##// END OF EJS Templates
cleanup: use stat_result[stat.ST_MTIME] instead of stat_result.st_mtime...
Augie Fackler -
r36922:ffa3026d default
parent child Browse files
Show More

The requested changes are too big and content was truncated. Show full diff

@@ -1,416 +1,418 b''
1 # extdiff.py - external diff program support for mercurial
1 # extdiff.py - external diff program support for mercurial
2 #
2 #
3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 '''command to allow external programs to compare revisions
8 '''command to allow external programs to compare revisions
9
9
10 The extdiff Mercurial extension allows you to use external programs
10 The extdiff Mercurial extension allows you to use external programs
11 to compare revisions, or revision with working directory. The external
11 to compare revisions, or revision with working directory. The external
12 diff programs are called with a configurable set of options and two
12 diff programs are called with a configurable set of options and two
13 non-option arguments: paths to directories containing snapshots of
13 non-option arguments: paths to directories containing snapshots of
14 files to compare.
14 files to compare.
15
15
16 The extdiff extension also allows you to configure new diff commands, so
16 The extdiff extension also allows you to configure new diff commands, so
17 you do not need to type :hg:`extdiff -p kdiff3` always. ::
17 you do not need to type :hg:`extdiff -p kdiff3` always. ::
18
18
19 [extdiff]
19 [extdiff]
20 # add new command that runs GNU diff(1) in 'context diff' mode
20 # add new command that runs GNU diff(1) in 'context diff' mode
21 cdiff = gdiff -Nprc5
21 cdiff = gdiff -Nprc5
22 ## or the old way:
22 ## or the old way:
23 #cmd.cdiff = gdiff
23 #cmd.cdiff = gdiff
24 #opts.cdiff = -Nprc5
24 #opts.cdiff = -Nprc5
25
25
26 # add new command called meld, runs meld (no need to name twice). If
26 # add new command called meld, runs meld (no need to name twice). If
27 # the meld executable is not available, the meld tool in [merge-tools]
27 # the meld executable is not available, the meld tool in [merge-tools]
28 # will be used, if available
28 # will be used, if available
29 meld =
29 meld =
30
30
31 # add new command called vimdiff, runs gvimdiff with DirDiff plugin
31 # add new command called vimdiff, runs gvimdiff with DirDiff plugin
32 # (see http://www.vim.org/scripts/script.php?script_id=102) Non
32 # (see http://www.vim.org/scripts/script.php?script_id=102) Non
33 # English user, be sure to put "let g:DirDiffDynamicDiffText = 1" in
33 # English user, be sure to put "let g:DirDiffDynamicDiffText = 1" in
34 # your .vimrc
34 # your .vimrc
35 vimdiff = gvim -f "+next" \\
35 vimdiff = gvim -f "+next" \\
36 "+execute 'DirDiff' fnameescape(argv(0)) fnameescape(argv(1))"
36 "+execute 'DirDiff' fnameescape(argv(0)) fnameescape(argv(1))"
37
37
38 Tool arguments can include variables that are expanded at runtime::
38 Tool arguments can include variables that are expanded at runtime::
39
39
40 $parent1, $plabel1 - filename, descriptive label of first parent
40 $parent1, $plabel1 - filename, descriptive label of first parent
41 $child, $clabel - filename, descriptive label of child revision
41 $child, $clabel - filename, descriptive label of child revision
42 $parent2, $plabel2 - filename, descriptive label of second parent
42 $parent2, $plabel2 - filename, descriptive label of second parent
43 $root - repository root
43 $root - repository root
44 $parent is an alias for $parent1.
44 $parent is an alias for $parent1.
45
45
46 The extdiff extension will look in your [diff-tools] and [merge-tools]
46 The extdiff extension will look in your [diff-tools] and [merge-tools]
47 sections for diff tool arguments, when none are specified in [extdiff].
47 sections for diff tool arguments, when none are specified in [extdiff].
48
48
49 ::
49 ::
50
50
51 [extdiff]
51 [extdiff]
52 kdiff3 =
52 kdiff3 =
53
53
54 [diff-tools]
54 [diff-tools]
55 kdiff3.diffargs=--L1 '$plabel1' --L2 '$clabel' $parent $child
55 kdiff3.diffargs=--L1 '$plabel1' --L2 '$clabel' $parent $child
56
56
57 You can use -I/-X and list of file or directory names like normal
57 You can use -I/-X and list of file or directory names like normal
58 :hg:`diff` command. The extdiff extension makes snapshots of only
58 :hg:`diff` command. The extdiff extension makes snapshots of only
59 needed files, so running the external diff program will actually be
59 needed files, so running the external diff program will actually be
60 pretty fast (at least faster than having to compare the entire tree).
60 pretty fast (at least faster than having to compare the entire tree).
61 '''
61 '''
62
62
63 from __future__ import absolute_import
63 from __future__ import absolute_import
64
64
65 import os
65 import os
66 import re
66 import re
67 import shutil
67 import shutil
68 import stat
68 import tempfile
69 import tempfile
69 from mercurial.i18n import _
70 from mercurial.i18n import _
70 from mercurial.node import (
71 from mercurial.node import (
71 nullid,
72 nullid,
72 short,
73 short,
73 )
74 )
74 from mercurial import (
75 from mercurial import (
75 archival,
76 archival,
76 cmdutil,
77 cmdutil,
77 error,
78 error,
78 filemerge,
79 filemerge,
79 pycompat,
80 pycompat,
80 registrar,
81 registrar,
81 scmutil,
82 scmutil,
82 util,
83 util,
83 )
84 )
84
85
85 cmdtable = {}
86 cmdtable = {}
86 command = registrar.command(cmdtable)
87 command = registrar.command(cmdtable)
87
88
88 configtable = {}
89 configtable = {}
89 configitem = registrar.configitem(configtable)
90 configitem = registrar.configitem(configtable)
90
91
91 configitem('extdiff', br'opts\..*',
92 configitem('extdiff', br'opts\..*',
92 default='',
93 default='',
93 generic=True,
94 generic=True,
94 )
95 )
95
96
96 configitem('diff-tools', br'.*\.diffargs$',
97 configitem('diff-tools', br'.*\.diffargs$',
97 default=None,
98 default=None,
98 generic=True,
99 generic=True,
99 )
100 )
100
101
101 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
102 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
102 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
103 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
103 # be specifying the version(s) of Mercurial they are tested with, or
104 # be specifying the version(s) of Mercurial they are tested with, or
104 # leave the attribute unspecified.
105 # leave the attribute unspecified.
105 testedwith = 'ships-with-hg-core'
106 testedwith = 'ships-with-hg-core'
106
107
107 def snapshot(ui, repo, files, node, tmproot, listsubrepos):
108 def snapshot(ui, repo, files, node, tmproot, listsubrepos):
108 '''snapshot files as of some revision
109 '''snapshot files as of some revision
109 if not using snapshot, -I/-X does not work and recursive diff
110 if not using snapshot, -I/-X does not work and recursive diff
110 in tools like kdiff3 and meld displays too many files.'''
111 in tools like kdiff3 and meld displays too many files.'''
111 dirname = os.path.basename(repo.root)
112 dirname = os.path.basename(repo.root)
112 if dirname == "":
113 if dirname == "":
113 dirname = "root"
114 dirname = "root"
114 if node is not None:
115 if node is not None:
115 dirname = '%s.%s' % (dirname, short(node))
116 dirname = '%s.%s' % (dirname, short(node))
116 base = os.path.join(tmproot, dirname)
117 base = os.path.join(tmproot, dirname)
117 os.mkdir(base)
118 os.mkdir(base)
118 fnsandstat = []
119 fnsandstat = []
119
120
120 if node is not None:
121 if node is not None:
121 ui.note(_('making snapshot of %d files from rev %s\n') %
122 ui.note(_('making snapshot of %d files from rev %s\n') %
122 (len(files), short(node)))
123 (len(files), short(node)))
123 else:
124 else:
124 ui.note(_('making snapshot of %d files from working directory\n') %
125 ui.note(_('making snapshot of %d files from working directory\n') %
125 (len(files)))
126 (len(files)))
126
127
127 if files:
128 if files:
128 repo.ui.setconfig("ui", "archivemeta", False)
129 repo.ui.setconfig("ui", "archivemeta", False)
129
130
130 archival.archive(repo, base, node, 'files',
131 archival.archive(repo, base, node, 'files',
131 matchfn=scmutil.matchfiles(repo, files),
132 matchfn=scmutil.matchfiles(repo, files),
132 subrepos=listsubrepos)
133 subrepos=listsubrepos)
133
134
134 for fn in sorted(files):
135 for fn in sorted(files):
135 wfn = util.pconvert(fn)
136 wfn = util.pconvert(fn)
136 ui.note(' %s\n' % wfn)
137 ui.note(' %s\n' % wfn)
137
138
138 if node is None:
139 if node is None:
139 dest = os.path.join(base, wfn)
140 dest = os.path.join(base, wfn)
140
141
141 fnsandstat.append((dest, repo.wjoin(fn), os.lstat(dest)))
142 fnsandstat.append((dest, repo.wjoin(fn), os.lstat(dest)))
142 return dirname, fnsandstat
143 return dirname, fnsandstat
143
144
144 def dodiff(ui, repo, cmdline, pats, opts):
145 def dodiff(ui, repo, cmdline, pats, opts):
145 '''Do the actual diff:
146 '''Do the actual diff:
146
147
147 - copy to a temp structure if diffing 2 internal revisions
148 - copy to a temp structure if diffing 2 internal revisions
148 - copy to a temp structure if diffing working revision with
149 - copy to a temp structure if diffing working revision with
149 another one and more than 1 file is changed
150 another one and more than 1 file is changed
150 - just invoke the diff for a single file in the working dir
151 - just invoke the diff for a single file in the working dir
151 '''
152 '''
152
153
153 revs = opts.get('rev')
154 revs = opts.get('rev')
154 change = opts.get('change')
155 change = opts.get('change')
155 do3way = '$parent2' in cmdline
156 do3way = '$parent2' in cmdline
156
157
157 if revs and change:
158 if revs and change:
158 msg = _('cannot specify --rev and --change at the same time')
159 msg = _('cannot specify --rev and --change at the same time')
159 raise error.Abort(msg)
160 raise error.Abort(msg)
160 elif change:
161 elif change:
161 node2 = scmutil.revsingle(repo, change, None).node()
162 node2 = scmutil.revsingle(repo, change, None).node()
162 node1a, node1b = repo.changelog.parents(node2)
163 node1a, node1b = repo.changelog.parents(node2)
163 else:
164 else:
164 node1a, node2 = scmutil.revpair(repo, revs)
165 node1a, node2 = scmutil.revpair(repo, revs)
165 if not revs:
166 if not revs:
166 node1b = repo.dirstate.p2()
167 node1b = repo.dirstate.p2()
167 else:
168 else:
168 node1b = nullid
169 node1b = nullid
169
170
170 # Disable 3-way merge if there is only one parent
171 # Disable 3-way merge if there is only one parent
171 if do3way:
172 if do3way:
172 if node1b == nullid:
173 if node1b == nullid:
173 do3way = False
174 do3way = False
174
175
175 subrepos=opts.get('subrepos')
176 subrepos=opts.get('subrepos')
176
177
177 matcher = scmutil.match(repo[node2], pats, opts)
178 matcher = scmutil.match(repo[node2], pats, opts)
178
179
179 if opts.get('patch'):
180 if opts.get('patch'):
180 if subrepos:
181 if subrepos:
181 raise error.Abort(_('--patch cannot be used with --subrepos'))
182 raise error.Abort(_('--patch cannot be used with --subrepos'))
182 if node2 is None:
183 if node2 is None:
183 raise error.Abort(_('--patch requires two revisions'))
184 raise error.Abort(_('--patch requires two revisions'))
184 else:
185 else:
185 mod_a, add_a, rem_a = map(set, repo.status(node1a, node2, matcher,
186 mod_a, add_a, rem_a = map(set, repo.status(node1a, node2, matcher,
186 listsubrepos=subrepos)[:3])
187 listsubrepos=subrepos)[:3])
187 if do3way:
188 if do3way:
188 mod_b, add_b, rem_b = map(set,
189 mod_b, add_b, rem_b = map(set,
189 repo.status(node1b, node2, matcher,
190 repo.status(node1b, node2, matcher,
190 listsubrepos=subrepos)[:3])
191 listsubrepos=subrepos)[:3])
191 else:
192 else:
192 mod_b, add_b, rem_b = set(), set(), set()
193 mod_b, add_b, rem_b = set(), set(), set()
193 modadd = mod_a | add_a | mod_b | add_b
194 modadd = mod_a | add_a | mod_b | add_b
194 common = modadd | rem_a | rem_b
195 common = modadd | rem_a | rem_b
195 if not common:
196 if not common:
196 return 0
197 return 0
197
198
198 tmproot = tempfile.mkdtemp(prefix='extdiff.')
199 tmproot = tempfile.mkdtemp(prefix='extdiff.')
199 try:
200 try:
200 if not opts.get('patch'):
201 if not opts.get('patch'):
201 # Always make a copy of node1a (and node1b, if applicable)
202 # Always make a copy of node1a (and node1b, if applicable)
202 dir1a_files = mod_a | rem_a | ((mod_b | add_b) - add_a)
203 dir1a_files = mod_a | rem_a | ((mod_b | add_b) - add_a)
203 dir1a = snapshot(ui, repo, dir1a_files, node1a, tmproot,
204 dir1a = snapshot(ui, repo, dir1a_files, node1a, tmproot,
204 subrepos)[0]
205 subrepos)[0]
205 rev1a = '@%d' % repo[node1a].rev()
206 rev1a = '@%d' % repo[node1a].rev()
206 if do3way:
207 if do3way:
207 dir1b_files = mod_b | rem_b | ((mod_a | add_a) - add_b)
208 dir1b_files = mod_b | rem_b | ((mod_a | add_a) - add_b)
208 dir1b = snapshot(ui, repo, dir1b_files, node1b, tmproot,
209 dir1b = snapshot(ui, repo, dir1b_files, node1b, tmproot,
209 subrepos)[0]
210 subrepos)[0]
210 rev1b = '@%d' % repo[node1b].rev()
211 rev1b = '@%d' % repo[node1b].rev()
211 else:
212 else:
212 dir1b = None
213 dir1b = None
213 rev1b = ''
214 rev1b = ''
214
215
215 fnsandstat = []
216 fnsandstat = []
216
217
217 # If node2 in not the wc or there is >1 change, copy it
218 # If node2 in not the wc or there is >1 change, copy it
218 dir2root = ''
219 dir2root = ''
219 rev2 = ''
220 rev2 = ''
220 if node2:
221 if node2:
221 dir2 = snapshot(ui, repo, modadd, node2, tmproot, subrepos)[0]
222 dir2 = snapshot(ui, repo, modadd, node2, tmproot, subrepos)[0]
222 rev2 = '@%d' % repo[node2].rev()
223 rev2 = '@%d' % repo[node2].rev()
223 elif len(common) > 1:
224 elif len(common) > 1:
224 #we only actually need to get the files to copy back to
225 #we only actually need to get the files to copy back to
225 #the working dir in this case (because the other cases
226 #the working dir in this case (because the other cases
226 #are: diffing 2 revisions or single file -- in which case
227 #are: diffing 2 revisions or single file -- in which case
227 #the file is already directly passed to the diff tool).
228 #the file is already directly passed to the diff tool).
228 dir2, fnsandstat = snapshot(ui, repo, modadd, None, tmproot,
229 dir2, fnsandstat = snapshot(ui, repo, modadd, None, tmproot,
229 subrepos)
230 subrepos)
230 else:
231 else:
231 # This lets the diff tool open the changed file directly
232 # This lets the diff tool open the changed file directly
232 dir2 = ''
233 dir2 = ''
233 dir2root = repo.root
234 dir2root = repo.root
234
235
235 label1a = rev1a
236 label1a = rev1a
236 label1b = rev1b
237 label1b = rev1b
237 label2 = rev2
238 label2 = rev2
238
239
239 # If only one change, diff the files instead of the directories
240 # If only one change, diff the files instead of the directories
240 # Handle bogus modifies correctly by checking if the files exist
241 # Handle bogus modifies correctly by checking if the files exist
241 if len(common) == 1:
242 if len(common) == 1:
242 common_file = util.localpath(common.pop())
243 common_file = util.localpath(common.pop())
243 dir1a = os.path.join(tmproot, dir1a, common_file)
244 dir1a = os.path.join(tmproot, dir1a, common_file)
244 label1a = common_file + rev1a
245 label1a = common_file + rev1a
245 if not os.path.isfile(dir1a):
246 if not os.path.isfile(dir1a):
246 dir1a = os.devnull
247 dir1a = os.devnull
247 if do3way:
248 if do3way:
248 dir1b = os.path.join(tmproot, dir1b, common_file)
249 dir1b = os.path.join(tmproot, dir1b, common_file)
249 label1b = common_file + rev1b
250 label1b = common_file + rev1b
250 if not os.path.isfile(dir1b):
251 if not os.path.isfile(dir1b):
251 dir1b = os.devnull
252 dir1b = os.devnull
252 dir2 = os.path.join(dir2root, dir2, common_file)
253 dir2 = os.path.join(dir2root, dir2, common_file)
253 label2 = common_file + rev2
254 label2 = common_file + rev2
254 else:
255 else:
255 template = 'hg-%h.patch'
256 template = 'hg-%h.patch'
256 cmdutil.export(repo, [repo[node1a].rev(), repo[node2].rev()],
257 cmdutil.export(repo, [repo[node1a].rev(), repo[node2].rev()],
257 fntemplate=repo.vfs.reljoin(tmproot, template),
258 fntemplate=repo.vfs.reljoin(tmproot, template),
258 match=matcher)
259 match=matcher)
259 label1a = cmdutil.makefilename(repo[node1a], template)
260 label1a = cmdutil.makefilename(repo[node1a], template)
260 label2 = cmdutil.makefilename(repo[node2], template)
261 label2 = cmdutil.makefilename(repo[node2], template)
261 dir1a = repo.vfs.reljoin(tmproot, label1a)
262 dir1a = repo.vfs.reljoin(tmproot, label1a)
262 dir2 = repo.vfs.reljoin(tmproot, label2)
263 dir2 = repo.vfs.reljoin(tmproot, label2)
263 dir1b = None
264 dir1b = None
264 label1b = None
265 label1b = None
265 fnsandstat = []
266 fnsandstat = []
266
267
267 # Function to quote file/dir names in the argument string.
268 # Function to quote file/dir names in the argument string.
268 # When not operating in 3-way mode, an empty string is
269 # When not operating in 3-way mode, an empty string is
269 # returned for parent2
270 # returned for parent2
270 replace = {'parent': dir1a, 'parent1': dir1a, 'parent2': dir1b,
271 replace = {'parent': dir1a, 'parent1': dir1a, 'parent2': dir1b,
271 'plabel1': label1a, 'plabel2': label1b,
272 'plabel1': label1a, 'plabel2': label1b,
272 'clabel': label2, 'child': dir2,
273 'clabel': label2, 'child': dir2,
273 'root': repo.root}
274 'root': repo.root}
274 def quote(match):
275 def quote(match):
275 pre = match.group(2)
276 pre = match.group(2)
276 key = match.group(3)
277 key = match.group(3)
277 if not do3way and key == 'parent2':
278 if not do3way and key == 'parent2':
278 return pre
279 return pre
279 return pre + util.shellquote(replace[key])
280 return pre + util.shellquote(replace[key])
280
281
281 # Match parent2 first, so 'parent1?' will match both parent1 and parent
282 # Match parent2 first, so 'parent1?' will match both parent1 and parent
282 regex = (br'''(['"]?)([^\s'"$]*)'''
283 regex = (br'''(['"]?)([^\s'"$]*)'''
283 br'\$(parent2|parent1?|child|plabel1|plabel2|clabel|root)\1')
284 br'\$(parent2|parent1?|child|plabel1|plabel2|clabel|root)\1')
284 if not do3way and not re.search(regex, cmdline):
285 if not do3way and not re.search(regex, cmdline):
285 cmdline += ' $parent1 $child'
286 cmdline += ' $parent1 $child'
286 cmdline = re.sub(regex, quote, cmdline)
287 cmdline = re.sub(regex, quote, cmdline)
287
288
288 ui.debug('running %r in %s\n' % (pycompat.bytestr(cmdline), tmproot))
289 ui.debug('running %r in %s\n' % (pycompat.bytestr(cmdline), tmproot))
289 ui.system(cmdline, cwd=tmproot, blockedtag='extdiff')
290 ui.system(cmdline, cwd=tmproot, blockedtag='extdiff')
290
291
291 for copy_fn, working_fn, st in fnsandstat:
292 for copy_fn, working_fn, st in fnsandstat:
292 cpstat = os.lstat(copy_fn)
293 cpstat = os.lstat(copy_fn)
293 # Some tools copy the file and attributes, so mtime may not detect
294 # Some tools copy the file and attributes, so mtime may not detect
294 # all changes. A size check will detect more cases, but not all.
295 # all changes. A size check will detect more cases, but not all.
295 # The only certain way to detect every case is to diff all files,
296 # The only certain way to detect every case is to diff all files,
296 # which could be expensive.
297 # which could be expensive.
297 # copyfile() carries over the permission, so the mode check could
298 # copyfile() carries over the permission, so the mode check could
298 # be in an 'elif' branch, but for the case where the file has
299 # be in an 'elif' branch, but for the case where the file has
299 # changed without affecting mtime or size.
300 # changed without affecting mtime or size.
300 if (cpstat.st_mtime != st.st_mtime or cpstat.st_size != st.st_size
301 if (cpstat[stat.ST_MTIME] != st[stat.ST_MTIME]
302 or cpstat.st_size != st.st_size
301 or (cpstat.st_mode & 0o100) != (st.st_mode & 0o100)):
303 or (cpstat.st_mode & 0o100) != (st.st_mode & 0o100)):
302 ui.debug('file changed while diffing. '
304 ui.debug('file changed while diffing. '
303 'Overwriting: %s (src: %s)\n' % (working_fn, copy_fn))
305 'Overwriting: %s (src: %s)\n' % (working_fn, copy_fn))
304 util.copyfile(copy_fn, working_fn)
306 util.copyfile(copy_fn, working_fn)
305
307
306 return 1
308 return 1
307 finally:
309 finally:
308 ui.note(_('cleaning up temp directory\n'))
310 ui.note(_('cleaning up temp directory\n'))
309 shutil.rmtree(tmproot)
311 shutil.rmtree(tmproot)
310
312
311 extdiffopts = [
313 extdiffopts = [
312 ('o', 'option', [],
314 ('o', 'option', [],
313 _('pass option to comparison program'), _('OPT')),
315 _('pass option to comparison program'), _('OPT')),
314 ('r', 'rev', [], _('revision'), _('REV')),
316 ('r', 'rev', [], _('revision'), _('REV')),
315 ('c', 'change', '', _('change made by revision'), _('REV')),
317 ('c', 'change', '', _('change made by revision'), _('REV')),
316 ('', 'patch', None, _('compare patches for two revisions'))
318 ('', 'patch', None, _('compare patches for two revisions'))
317 ] + cmdutil.walkopts + cmdutil.subrepoopts
319 ] + cmdutil.walkopts + cmdutil.subrepoopts
318
320
319 @command('extdiff',
321 @command('extdiff',
320 [('p', 'program', '', _('comparison program to run'), _('CMD')),
322 [('p', 'program', '', _('comparison program to run'), _('CMD')),
321 ] + extdiffopts,
323 ] + extdiffopts,
322 _('hg extdiff [OPT]... [FILE]...'),
324 _('hg extdiff [OPT]... [FILE]...'),
323 inferrepo=True)
325 inferrepo=True)
324 def extdiff(ui, repo, *pats, **opts):
326 def extdiff(ui, repo, *pats, **opts):
325 '''use external program to diff repository (or selected files)
327 '''use external program to diff repository (or selected files)
326
328
327 Show differences between revisions for the specified files, using
329 Show differences between revisions for the specified files, using
328 an external program. The default program used is diff, with
330 an external program. The default program used is diff, with
329 default options "-Npru".
331 default options "-Npru".
330
332
331 To select a different program, use the -p/--program option. The
333 To select a different program, use the -p/--program option. The
332 program will be passed the names of two directories to compare. To
334 program will be passed the names of two directories to compare. To
333 pass additional options to the program, use -o/--option. These
335 pass additional options to the program, use -o/--option. These
334 will be passed before the names of the directories to compare.
336 will be passed before the names of the directories to compare.
335
337
336 When two revision arguments are given, then changes are shown
338 When two revision arguments are given, then changes are shown
337 between those revisions. If only one revision is specified then
339 between those revisions. If only one revision is specified then
338 that revision is compared to the working directory, and, when no
340 that revision is compared to the working directory, and, when no
339 revisions are specified, the working directory files are compared
341 revisions are specified, the working directory files are compared
340 to its parent.'''
342 to its parent.'''
341 opts = pycompat.byteskwargs(opts)
343 opts = pycompat.byteskwargs(opts)
342 program = opts.get('program')
344 program = opts.get('program')
343 option = opts.get('option')
345 option = opts.get('option')
344 if not program:
346 if not program:
345 program = 'diff'
347 program = 'diff'
346 option = option or ['-Npru']
348 option = option or ['-Npru']
347 cmdline = ' '.join(map(util.shellquote, [program] + option))
349 cmdline = ' '.join(map(util.shellquote, [program] + option))
348 return dodiff(ui, repo, cmdline, pats, opts)
350 return dodiff(ui, repo, cmdline, pats, opts)
349
351
350 class savedcmd(object):
352 class savedcmd(object):
351 """use external program to diff repository (or selected files)
353 """use external program to diff repository (or selected files)
352
354
353 Show differences between revisions for the specified files, using
355 Show differences between revisions for the specified files, using
354 the following program::
356 the following program::
355
357
356 %(path)s
358 %(path)s
357
359
358 When two revision arguments are given, then changes are shown
360 When two revision arguments are given, then changes are shown
359 between those revisions. If only one revision is specified then
361 between those revisions. If only one revision is specified then
360 that revision is compared to the working directory, and, when no
362 that revision is compared to the working directory, and, when no
361 revisions are specified, the working directory files are compared
363 revisions are specified, the working directory files are compared
362 to its parent.
364 to its parent.
363 """
365 """
364
366
365 def __init__(self, path, cmdline):
367 def __init__(self, path, cmdline):
366 # We can't pass non-ASCII through docstrings (and path is
368 # We can't pass non-ASCII through docstrings (and path is
367 # in an unknown encoding anyway)
369 # in an unknown encoding anyway)
368 docpath = util.escapestr(path)
370 docpath = util.escapestr(path)
369 self.__doc__ %= {r'path': pycompat.sysstr(util.uirepr(docpath))}
371 self.__doc__ %= {r'path': pycompat.sysstr(util.uirepr(docpath))}
370 self._cmdline = cmdline
372 self._cmdline = cmdline
371
373
372 def __call__(self, ui, repo, *pats, **opts):
374 def __call__(self, ui, repo, *pats, **opts):
373 opts = pycompat.byteskwargs(opts)
375 opts = pycompat.byteskwargs(opts)
374 options = ' '.join(map(util.shellquote, opts['option']))
376 options = ' '.join(map(util.shellquote, opts['option']))
375 if options:
377 if options:
376 options = ' ' + options
378 options = ' ' + options
377 return dodiff(ui, repo, self._cmdline + options, pats, opts)
379 return dodiff(ui, repo, self._cmdline + options, pats, opts)
378
380
379 def uisetup(ui):
381 def uisetup(ui):
380 for cmd, path in ui.configitems('extdiff'):
382 for cmd, path in ui.configitems('extdiff'):
381 path = util.expandpath(path)
383 path = util.expandpath(path)
382 if cmd.startswith('cmd.'):
384 if cmd.startswith('cmd.'):
383 cmd = cmd[4:]
385 cmd = cmd[4:]
384 if not path:
386 if not path:
385 path = util.findexe(cmd)
387 path = util.findexe(cmd)
386 if path is None:
388 if path is None:
387 path = filemerge.findexternaltool(ui, cmd) or cmd
389 path = filemerge.findexternaltool(ui, cmd) or cmd
388 diffopts = ui.config('extdiff', 'opts.' + cmd)
390 diffopts = ui.config('extdiff', 'opts.' + cmd)
389 cmdline = util.shellquote(path)
391 cmdline = util.shellquote(path)
390 if diffopts:
392 if diffopts:
391 cmdline += ' ' + diffopts
393 cmdline += ' ' + diffopts
392 elif cmd.startswith('opts.'):
394 elif cmd.startswith('opts.'):
393 continue
395 continue
394 else:
396 else:
395 if path:
397 if path:
396 # case "cmd = path opts"
398 # case "cmd = path opts"
397 cmdline = path
399 cmdline = path
398 diffopts = len(pycompat.shlexsplit(cmdline)) > 1
400 diffopts = len(pycompat.shlexsplit(cmdline)) > 1
399 else:
401 else:
400 # case "cmd ="
402 # case "cmd ="
401 path = util.findexe(cmd)
403 path = util.findexe(cmd)
402 if path is None:
404 if path is None:
403 path = filemerge.findexternaltool(ui, cmd) or cmd
405 path = filemerge.findexternaltool(ui, cmd) or cmd
404 cmdline = util.shellquote(path)
406 cmdline = util.shellquote(path)
405 diffopts = False
407 diffopts = False
406 # look for diff arguments in [diff-tools] then [merge-tools]
408 # look for diff arguments in [diff-tools] then [merge-tools]
407 if not diffopts:
409 if not diffopts:
408 args = ui.config('diff-tools', cmd+'.diffargs') or \
410 args = ui.config('diff-tools', cmd+'.diffargs') or \
409 ui.config('merge-tools', cmd+'.diffargs')
411 ui.config('merge-tools', cmd+'.diffargs')
410 if args:
412 if args:
411 cmdline += ' ' + args
413 cmdline += ' ' + args
412 command(cmd, extdiffopts[:], _('hg %s [OPTION]... [FILE]...') % cmd,
414 command(cmd, extdiffopts[:], _('hg %s [OPTION]... [FILE]...') % cmd,
413 inferrepo=True)(savedcmd(path, cmdline))
415 inferrepo=True)(savedcmd(path, cmdline))
414
416
415 # tell hggettext to extract docstrings from these functions:
417 # tell hggettext to extract docstrings from these functions:
416 i18nfunctions = [savedcmd]
418 i18nfunctions = [savedcmd]
@@ -1,1060 +1,1061 b''
1 # shelve.py - save/restore working directory state
1 # shelve.py - save/restore working directory state
2 #
2 #
3 # Copyright 2013 Facebook, Inc.
3 # Copyright 2013 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """save and restore changes to the working directory
8 """save and restore changes to the working directory
9
9
10 The "hg shelve" command saves changes made to the working directory
10 The "hg shelve" command saves changes made to the working directory
11 and reverts those changes, resetting the working directory to a clean
11 and reverts those changes, resetting the working directory to a clean
12 state.
12 state.
13
13
14 Later on, the "hg unshelve" command restores the changes saved by "hg
14 Later on, the "hg unshelve" command restores the changes saved by "hg
15 shelve". Changes can be restored even after updating to a different
15 shelve". Changes can be restored even after updating to a different
16 parent, in which case Mercurial's merge machinery will resolve any
16 parent, in which case Mercurial's merge machinery will resolve any
17 conflicts if necessary.
17 conflicts if necessary.
18
18
19 You can have more than one shelved change outstanding at a time; each
19 You can have more than one shelved change outstanding at a time; each
20 shelved change has a distinct name. For details, see the help for "hg
20 shelved change has a distinct name. For details, see the help for "hg
21 shelve".
21 shelve".
22 """
22 """
23 from __future__ import absolute_import
23 from __future__ import absolute_import
24
24
25 import collections
25 import collections
26 import errno
26 import errno
27 import itertools
27 import itertools
28 import stat
28
29
29 from mercurial.i18n import _
30 from mercurial.i18n import _
30 from mercurial import (
31 from mercurial import (
31 bookmarks,
32 bookmarks,
32 bundle2,
33 bundle2,
33 bundlerepo,
34 bundlerepo,
34 changegroup,
35 changegroup,
35 cmdutil,
36 cmdutil,
36 discovery,
37 discovery,
37 error,
38 error,
38 exchange,
39 exchange,
39 hg,
40 hg,
40 lock as lockmod,
41 lock as lockmod,
41 mdiff,
42 mdiff,
42 merge,
43 merge,
43 node as nodemod,
44 node as nodemod,
44 patch,
45 patch,
45 phases,
46 phases,
46 pycompat,
47 pycompat,
47 registrar,
48 registrar,
48 repair,
49 repair,
49 scmutil,
50 scmutil,
50 templatefilters,
51 templatefilters,
51 util,
52 util,
52 vfs as vfsmod,
53 vfs as vfsmod,
53 )
54 )
54
55
55 from . import (
56 from . import (
56 rebase,
57 rebase,
57 )
58 )
58 from mercurial.utils import dateutil
59 from mercurial.utils import dateutil
59
60
60 cmdtable = {}
61 cmdtable = {}
61 command = registrar.command(cmdtable)
62 command = registrar.command(cmdtable)
62 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
63 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
63 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
64 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
64 # be specifying the version(s) of Mercurial they are tested with, or
65 # be specifying the version(s) of Mercurial they are tested with, or
65 # leave the attribute unspecified.
66 # leave the attribute unspecified.
66 testedwith = 'ships-with-hg-core'
67 testedwith = 'ships-with-hg-core'
67
68
68 configtable = {}
69 configtable = {}
69 configitem = registrar.configitem(configtable)
70 configitem = registrar.configitem(configtable)
70
71
71 configitem('shelve', 'maxbackups',
72 configitem('shelve', 'maxbackups',
72 default=10,
73 default=10,
73 )
74 )
74
75
75 backupdir = 'shelve-backup'
76 backupdir = 'shelve-backup'
76 shelvedir = 'shelved'
77 shelvedir = 'shelved'
77 shelvefileextensions = ['hg', 'patch', 'oshelve']
78 shelvefileextensions = ['hg', 'patch', 'oshelve']
78 # universal extension is present in all types of shelves
79 # universal extension is present in all types of shelves
79 patchextension = 'patch'
80 patchextension = 'patch'
80
81
81 # we never need the user, so we use a
82 # we never need the user, so we use a
82 # generic user for all shelve operations
83 # generic user for all shelve operations
83 shelveuser = 'shelve@localhost'
84 shelveuser = 'shelve@localhost'
84
85
85 class shelvedfile(object):
86 class shelvedfile(object):
86 """Helper for the file storing a single shelve
87 """Helper for the file storing a single shelve
87
88
88 Handles common functions on shelve files (.hg/.patch) using
89 Handles common functions on shelve files (.hg/.patch) using
89 the vfs layer"""
90 the vfs layer"""
90 def __init__(self, repo, name, filetype=None):
91 def __init__(self, repo, name, filetype=None):
91 self.repo = repo
92 self.repo = repo
92 self.name = name
93 self.name = name
93 self.vfs = vfsmod.vfs(repo.vfs.join(shelvedir))
94 self.vfs = vfsmod.vfs(repo.vfs.join(shelvedir))
94 self.backupvfs = vfsmod.vfs(repo.vfs.join(backupdir))
95 self.backupvfs = vfsmod.vfs(repo.vfs.join(backupdir))
95 self.ui = self.repo.ui
96 self.ui = self.repo.ui
96 if filetype:
97 if filetype:
97 self.fname = name + '.' + filetype
98 self.fname = name + '.' + filetype
98 else:
99 else:
99 self.fname = name
100 self.fname = name
100
101
101 def exists(self):
102 def exists(self):
102 return self.vfs.exists(self.fname)
103 return self.vfs.exists(self.fname)
103
104
104 def filename(self):
105 def filename(self):
105 return self.vfs.join(self.fname)
106 return self.vfs.join(self.fname)
106
107
107 def backupfilename(self):
108 def backupfilename(self):
108 def gennames(base):
109 def gennames(base):
109 yield base
110 yield base
110 base, ext = base.rsplit('.', 1)
111 base, ext = base.rsplit('.', 1)
111 for i in itertools.count(1):
112 for i in itertools.count(1):
112 yield '%s-%d.%s' % (base, i, ext)
113 yield '%s-%d.%s' % (base, i, ext)
113
114
114 name = self.backupvfs.join(self.fname)
115 name = self.backupvfs.join(self.fname)
115 for n in gennames(name):
116 for n in gennames(name):
116 if not self.backupvfs.exists(n):
117 if not self.backupvfs.exists(n):
117 return n
118 return n
118
119
119 def movetobackup(self):
120 def movetobackup(self):
120 if not self.backupvfs.isdir():
121 if not self.backupvfs.isdir():
121 self.backupvfs.makedir()
122 self.backupvfs.makedir()
122 util.rename(self.filename(), self.backupfilename())
123 util.rename(self.filename(), self.backupfilename())
123
124
124 def stat(self):
125 def stat(self):
125 return self.vfs.stat(self.fname)
126 return self.vfs.stat(self.fname)
126
127
127 def opener(self, mode='rb'):
128 def opener(self, mode='rb'):
128 try:
129 try:
129 return self.vfs(self.fname, mode)
130 return self.vfs(self.fname, mode)
130 except IOError as err:
131 except IOError as err:
131 if err.errno != errno.ENOENT:
132 if err.errno != errno.ENOENT:
132 raise
133 raise
133 raise error.Abort(_("shelved change '%s' not found") % self.name)
134 raise error.Abort(_("shelved change '%s' not found") % self.name)
134
135
135 def applybundle(self):
136 def applybundle(self):
136 fp = self.opener()
137 fp = self.opener()
137 try:
138 try:
138 gen = exchange.readbundle(self.repo.ui, fp, self.fname, self.vfs)
139 gen = exchange.readbundle(self.repo.ui, fp, self.fname, self.vfs)
139 bundle2.applybundle(self.repo, gen, self.repo.currenttransaction(),
140 bundle2.applybundle(self.repo, gen, self.repo.currenttransaction(),
140 source='unshelve',
141 source='unshelve',
141 url='bundle:' + self.vfs.join(self.fname),
142 url='bundle:' + self.vfs.join(self.fname),
142 targetphase=phases.secret)
143 targetphase=phases.secret)
143 finally:
144 finally:
144 fp.close()
145 fp.close()
145
146
146 def bundlerepo(self):
147 def bundlerepo(self):
147 return bundlerepo.bundlerepository(self.repo.baseui, self.repo.root,
148 return bundlerepo.bundlerepository(self.repo.baseui, self.repo.root,
148 self.vfs.join(self.fname))
149 self.vfs.join(self.fname))
149 def writebundle(self, bases, node):
150 def writebundle(self, bases, node):
150 cgversion = changegroup.safeversion(self.repo)
151 cgversion = changegroup.safeversion(self.repo)
151 if cgversion == '01':
152 if cgversion == '01':
152 btype = 'HG10BZ'
153 btype = 'HG10BZ'
153 compression = None
154 compression = None
154 else:
155 else:
155 btype = 'HG20'
156 btype = 'HG20'
156 compression = 'BZ'
157 compression = 'BZ'
157
158
158 outgoing = discovery.outgoing(self.repo, missingroots=bases,
159 outgoing = discovery.outgoing(self.repo, missingroots=bases,
159 missingheads=[node])
160 missingheads=[node])
160 cg = changegroup.makechangegroup(self.repo, outgoing, cgversion,
161 cg = changegroup.makechangegroup(self.repo, outgoing, cgversion,
161 'shelve')
162 'shelve')
162
163
163 bundle2.writebundle(self.ui, cg, self.fname, btype, self.vfs,
164 bundle2.writebundle(self.ui, cg, self.fname, btype, self.vfs,
164 compression=compression)
165 compression=compression)
165
166
166 def writeobsshelveinfo(self, info):
167 def writeobsshelveinfo(self, info):
167 scmutil.simplekeyvaluefile(self.vfs, self.fname).write(info)
168 scmutil.simplekeyvaluefile(self.vfs, self.fname).write(info)
168
169
169 def readobsshelveinfo(self):
170 def readobsshelveinfo(self):
170 return scmutil.simplekeyvaluefile(self.vfs, self.fname).read()
171 return scmutil.simplekeyvaluefile(self.vfs, self.fname).read()
171
172
172 class shelvedstate(object):
173 class shelvedstate(object):
173 """Handle persistence during unshelving operations.
174 """Handle persistence during unshelving operations.
174
175
175 Handles saving and restoring a shelved state. Ensures that different
176 Handles saving and restoring a shelved state. Ensures that different
176 versions of a shelved state are possible and handles them appropriately.
177 versions of a shelved state are possible and handles them appropriately.
177 """
178 """
178 _version = 2
179 _version = 2
179 _filename = 'shelvedstate'
180 _filename = 'shelvedstate'
180 _keep = 'keep'
181 _keep = 'keep'
181 _nokeep = 'nokeep'
182 _nokeep = 'nokeep'
182 # colon is essential to differentiate from a real bookmark name
183 # colon is essential to differentiate from a real bookmark name
183 _noactivebook = ':no-active-bookmark'
184 _noactivebook = ':no-active-bookmark'
184
185
185 @classmethod
186 @classmethod
186 def _verifyandtransform(cls, d):
187 def _verifyandtransform(cls, d):
187 """Some basic shelvestate syntactic verification and transformation"""
188 """Some basic shelvestate syntactic verification and transformation"""
188 try:
189 try:
189 d['originalwctx'] = nodemod.bin(d['originalwctx'])
190 d['originalwctx'] = nodemod.bin(d['originalwctx'])
190 d['pendingctx'] = nodemod.bin(d['pendingctx'])
191 d['pendingctx'] = nodemod.bin(d['pendingctx'])
191 d['parents'] = [nodemod.bin(h)
192 d['parents'] = [nodemod.bin(h)
192 for h in d['parents'].split(' ')]
193 for h in d['parents'].split(' ')]
193 d['nodestoremove'] = [nodemod.bin(h)
194 d['nodestoremove'] = [nodemod.bin(h)
194 for h in d['nodestoremove'].split(' ')]
195 for h in d['nodestoremove'].split(' ')]
195 except (ValueError, TypeError, KeyError) as err:
196 except (ValueError, TypeError, KeyError) as err:
196 raise error.CorruptedState(pycompat.bytestr(err))
197 raise error.CorruptedState(pycompat.bytestr(err))
197
198
198 @classmethod
199 @classmethod
199 def _getversion(cls, repo):
200 def _getversion(cls, repo):
200 """Read version information from shelvestate file"""
201 """Read version information from shelvestate file"""
201 fp = repo.vfs(cls._filename)
202 fp = repo.vfs(cls._filename)
202 try:
203 try:
203 version = int(fp.readline().strip())
204 version = int(fp.readline().strip())
204 except ValueError as err:
205 except ValueError as err:
205 raise error.CorruptedState(pycompat.bytestr(err))
206 raise error.CorruptedState(pycompat.bytestr(err))
206 finally:
207 finally:
207 fp.close()
208 fp.close()
208 return version
209 return version
209
210
210 @classmethod
211 @classmethod
211 def _readold(cls, repo):
212 def _readold(cls, repo):
212 """Read the old position-based version of a shelvestate file"""
213 """Read the old position-based version of a shelvestate file"""
213 # Order is important, because old shelvestate file uses it
214 # Order is important, because old shelvestate file uses it
214 # to detemine values of fields (i.g. name is on the second line,
215 # to detemine values of fields (i.g. name is on the second line,
215 # originalwctx is on the third and so forth). Please do not change.
216 # originalwctx is on the third and so forth). Please do not change.
216 keys = ['version', 'name', 'originalwctx', 'pendingctx', 'parents',
217 keys = ['version', 'name', 'originalwctx', 'pendingctx', 'parents',
217 'nodestoremove', 'branchtorestore', 'keep', 'activebook']
218 'nodestoremove', 'branchtorestore', 'keep', 'activebook']
218 # this is executed only seldomly, so it is not a big deal
219 # this is executed only seldomly, so it is not a big deal
219 # that we open this file twice
220 # that we open this file twice
220 fp = repo.vfs(cls._filename)
221 fp = repo.vfs(cls._filename)
221 d = {}
222 d = {}
222 try:
223 try:
223 for key in keys:
224 for key in keys:
224 d[key] = fp.readline().strip()
225 d[key] = fp.readline().strip()
225 finally:
226 finally:
226 fp.close()
227 fp.close()
227 return d
228 return d
228
229
229 @classmethod
230 @classmethod
230 def load(cls, repo):
231 def load(cls, repo):
231 version = cls._getversion(repo)
232 version = cls._getversion(repo)
232 if version < cls._version:
233 if version < cls._version:
233 d = cls._readold(repo)
234 d = cls._readold(repo)
234 elif version == cls._version:
235 elif version == cls._version:
235 d = scmutil.simplekeyvaluefile(repo.vfs, cls._filename)\
236 d = scmutil.simplekeyvaluefile(repo.vfs, cls._filename)\
236 .read(firstlinenonkeyval=True)
237 .read(firstlinenonkeyval=True)
237 else:
238 else:
238 raise error.Abort(_('this version of shelve is incompatible '
239 raise error.Abort(_('this version of shelve is incompatible '
239 'with the version used in this repo'))
240 'with the version used in this repo'))
240
241
241 cls._verifyandtransform(d)
242 cls._verifyandtransform(d)
242 try:
243 try:
243 obj = cls()
244 obj = cls()
244 obj.name = d['name']
245 obj.name = d['name']
245 obj.wctx = repo[d['originalwctx']]
246 obj.wctx = repo[d['originalwctx']]
246 obj.pendingctx = repo[d['pendingctx']]
247 obj.pendingctx = repo[d['pendingctx']]
247 obj.parents = d['parents']
248 obj.parents = d['parents']
248 obj.nodestoremove = d['nodestoremove']
249 obj.nodestoremove = d['nodestoremove']
249 obj.branchtorestore = d.get('branchtorestore', '')
250 obj.branchtorestore = d.get('branchtorestore', '')
250 obj.keep = d.get('keep') == cls._keep
251 obj.keep = d.get('keep') == cls._keep
251 obj.activebookmark = ''
252 obj.activebookmark = ''
252 if d.get('activebook', '') != cls._noactivebook:
253 if d.get('activebook', '') != cls._noactivebook:
253 obj.activebookmark = d.get('activebook', '')
254 obj.activebookmark = d.get('activebook', '')
254 except (error.RepoLookupError, KeyError) as err:
255 except (error.RepoLookupError, KeyError) as err:
255 raise error.CorruptedState(pycompat.bytestr(err))
256 raise error.CorruptedState(pycompat.bytestr(err))
256
257
257 return obj
258 return obj
258
259
259 @classmethod
260 @classmethod
260 def save(cls, repo, name, originalwctx, pendingctx, nodestoremove,
261 def save(cls, repo, name, originalwctx, pendingctx, nodestoremove,
261 branchtorestore, keep=False, activebook=''):
262 branchtorestore, keep=False, activebook=''):
262 info = {
263 info = {
263 "name": name,
264 "name": name,
264 "originalwctx": nodemod.hex(originalwctx.node()),
265 "originalwctx": nodemod.hex(originalwctx.node()),
265 "pendingctx": nodemod.hex(pendingctx.node()),
266 "pendingctx": nodemod.hex(pendingctx.node()),
266 "parents": ' '.join([nodemod.hex(p)
267 "parents": ' '.join([nodemod.hex(p)
267 for p in repo.dirstate.parents()]),
268 for p in repo.dirstate.parents()]),
268 "nodestoremove": ' '.join([nodemod.hex(n)
269 "nodestoremove": ' '.join([nodemod.hex(n)
269 for n in nodestoremove]),
270 for n in nodestoremove]),
270 "branchtorestore": branchtorestore,
271 "branchtorestore": branchtorestore,
271 "keep": cls._keep if keep else cls._nokeep,
272 "keep": cls._keep if keep else cls._nokeep,
272 "activebook": activebook or cls._noactivebook
273 "activebook": activebook or cls._noactivebook
273 }
274 }
274 scmutil.simplekeyvaluefile(repo.vfs, cls._filename)\
275 scmutil.simplekeyvaluefile(repo.vfs, cls._filename)\
275 .write(info, firstline=("%d" % cls._version))
276 .write(info, firstline=("%d" % cls._version))
276
277
277 @classmethod
278 @classmethod
278 def clear(cls, repo):
279 def clear(cls, repo):
279 repo.vfs.unlinkpath(cls._filename, ignoremissing=True)
280 repo.vfs.unlinkpath(cls._filename, ignoremissing=True)
280
281
281 def cleanupoldbackups(repo):
282 def cleanupoldbackups(repo):
282 vfs = vfsmod.vfs(repo.vfs.join(backupdir))
283 vfs = vfsmod.vfs(repo.vfs.join(backupdir))
283 maxbackups = repo.ui.configint('shelve', 'maxbackups')
284 maxbackups = repo.ui.configint('shelve', 'maxbackups')
284 hgfiles = [f for f in vfs.listdir()
285 hgfiles = [f for f in vfs.listdir()
285 if f.endswith('.' + patchextension)]
286 if f.endswith('.' + patchextension)]
286 hgfiles = sorted([(vfs.stat(f).st_mtime, f) for f in hgfiles])
287 hgfiles = sorted([(vfs.stat(f)[stat.ST_MTIME], f) for f in hgfiles])
287 if 0 < maxbackups and maxbackups < len(hgfiles):
288 if 0 < maxbackups and maxbackups < len(hgfiles):
288 bordermtime = hgfiles[-maxbackups][0]
289 bordermtime = hgfiles[-maxbackups][0]
289 else:
290 else:
290 bordermtime = None
291 bordermtime = None
291 for mtime, f in hgfiles[:len(hgfiles) - maxbackups]:
292 for mtime, f in hgfiles[:len(hgfiles) - maxbackups]:
292 if mtime == bordermtime:
293 if mtime == bordermtime:
293 # keep it, because timestamp can't decide exact order of backups
294 # keep it, because timestamp can't decide exact order of backups
294 continue
295 continue
295 base = f[:-(1 + len(patchextension))]
296 base = f[:-(1 + len(patchextension))]
296 for ext in shelvefileextensions:
297 for ext in shelvefileextensions:
297 vfs.tryunlink(base + '.' + ext)
298 vfs.tryunlink(base + '.' + ext)
298
299
299 def _backupactivebookmark(repo):
300 def _backupactivebookmark(repo):
300 activebookmark = repo._activebookmark
301 activebookmark = repo._activebookmark
301 if activebookmark:
302 if activebookmark:
302 bookmarks.deactivate(repo)
303 bookmarks.deactivate(repo)
303 return activebookmark
304 return activebookmark
304
305
305 def _restoreactivebookmark(repo, mark):
306 def _restoreactivebookmark(repo, mark):
306 if mark:
307 if mark:
307 bookmarks.activate(repo, mark)
308 bookmarks.activate(repo, mark)
308
309
309 def _aborttransaction(repo):
310 def _aborttransaction(repo):
310 '''Abort current transaction for shelve/unshelve, but keep dirstate
311 '''Abort current transaction for shelve/unshelve, but keep dirstate
311 '''
312 '''
312 tr = repo.currenttransaction()
313 tr = repo.currenttransaction()
313 backupname = 'dirstate.shelve'
314 backupname = 'dirstate.shelve'
314 repo.dirstate.savebackup(tr, backupname)
315 repo.dirstate.savebackup(tr, backupname)
315 tr.abort()
316 tr.abort()
316 repo.dirstate.restorebackup(None, backupname)
317 repo.dirstate.restorebackup(None, backupname)
317
318
318 def createcmd(ui, repo, pats, opts):
319 def createcmd(ui, repo, pats, opts):
319 """subcommand that creates a new shelve"""
320 """subcommand that creates a new shelve"""
320 with repo.wlock():
321 with repo.wlock():
321 cmdutil.checkunfinished(repo)
322 cmdutil.checkunfinished(repo)
322 return _docreatecmd(ui, repo, pats, opts)
323 return _docreatecmd(ui, repo, pats, opts)
323
324
324 def getshelvename(repo, parent, opts):
325 def getshelvename(repo, parent, opts):
325 """Decide on the name this shelve is going to have"""
326 """Decide on the name this shelve is going to have"""
326 def gennames():
327 def gennames():
327 yield label
328 yield label
328 for i in itertools.count(1):
329 for i in itertools.count(1):
329 yield '%s-%02d' % (label, i)
330 yield '%s-%02d' % (label, i)
330 name = opts.get('name')
331 name = opts.get('name')
331 label = repo._activebookmark or parent.branch() or 'default'
332 label = repo._activebookmark or parent.branch() or 'default'
332 # slashes aren't allowed in filenames, therefore we rename it
333 # slashes aren't allowed in filenames, therefore we rename it
333 label = label.replace('/', '_')
334 label = label.replace('/', '_')
334 label = label.replace('\\', '_')
335 label = label.replace('\\', '_')
335 # filenames must not start with '.' as it should not be hidden
336 # filenames must not start with '.' as it should not be hidden
336 if label.startswith('.'):
337 if label.startswith('.'):
337 label = label.replace('.', '_', 1)
338 label = label.replace('.', '_', 1)
338
339
339 if name:
340 if name:
340 if shelvedfile(repo, name, patchextension).exists():
341 if shelvedfile(repo, name, patchextension).exists():
341 e = _("a shelved change named '%s' already exists") % name
342 e = _("a shelved change named '%s' already exists") % name
342 raise error.Abort(e)
343 raise error.Abort(e)
343
344
344 # ensure we are not creating a subdirectory or a hidden file
345 # ensure we are not creating a subdirectory or a hidden file
345 if '/' in name or '\\' in name:
346 if '/' in name or '\\' in name:
346 raise error.Abort(_('shelved change names can not contain slashes'))
347 raise error.Abort(_('shelved change names can not contain slashes'))
347 if name.startswith('.'):
348 if name.startswith('.'):
348 raise error.Abort(_("shelved change names can not start with '.'"))
349 raise error.Abort(_("shelved change names can not start with '.'"))
349
350
350 else:
351 else:
351 for n in gennames():
352 for n in gennames():
352 if not shelvedfile(repo, n, patchextension).exists():
353 if not shelvedfile(repo, n, patchextension).exists():
353 name = n
354 name = n
354 break
355 break
355
356
356 return name
357 return name
357
358
358 def mutableancestors(ctx):
359 def mutableancestors(ctx):
359 """return all mutable ancestors for ctx (included)
360 """return all mutable ancestors for ctx (included)
360
361
361 Much faster than the revset ancestors(ctx) & draft()"""
362 Much faster than the revset ancestors(ctx) & draft()"""
362 seen = {nodemod.nullrev}
363 seen = {nodemod.nullrev}
363 visit = collections.deque()
364 visit = collections.deque()
364 visit.append(ctx)
365 visit.append(ctx)
365 while visit:
366 while visit:
366 ctx = visit.popleft()
367 ctx = visit.popleft()
367 yield ctx.node()
368 yield ctx.node()
368 for parent in ctx.parents():
369 for parent in ctx.parents():
369 rev = parent.rev()
370 rev = parent.rev()
370 if rev not in seen:
371 if rev not in seen:
371 seen.add(rev)
372 seen.add(rev)
372 if parent.mutable():
373 if parent.mutable():
373 visit.append(parent)
374 visit.append(parent)
374
375
375 def getcommitfunc(extra, interactive, editor=False):
376 def getcommitfunc(extra, interactive, editor=False):
376 def commitfunc(ui, repo, message, match, opts):
377 def commitfunc(ui, repo, message, match, opts):
377 hasmq = util.safehasattr(repo, 'mq')
378 hasmq = util.safehasattr(repo, 'mq')
378 if hasmq:
379 if hasmq:
379 saved, repo.mq.checkapplied = repo.mq.checkapplied, False
380 saved, repo.mq.checkapplied = repo.mq.checkapplied, False
380 overrides = {('phases', 'new-commit'): phases.secret}
381 overrides = {('phases', 'new-commit'): phases.secret}
381 try:
382 try:
382 editor_ = False
383 editor_ = False
383 if editor:
384 if editor:
384 editor_ = cmdutil.getcommiteditor(editform='shelve.shelve',
385 editor_ = cmdutil.getcommiteditor(editform='shelve.shelve',
385 **pycompat.strkwargs(opts))
386 **pycompat.strkwargs(opts))
386 with repo.ui.configoverride(overrides):
387 with repo.ui.configoverride(overrides):
387 return repo.commit(message, shelveuser, opts.get('date'),
388 return repo.commit(message, shelveuser, opts.get('date'),
388 match, editor=editor_, extra=extra)
389 match, editor=editor_, extra=extra)
389 finally:
390 finally:
390 if hasmq:
391 if hasmq:
391 repo.mq.checkapplied = saved
392 repo.mq.checkapplied = saved
392
393
393 def interactivecommitfunc(ui, repo, *pats, **opts):
394 def interactivecommitfunc(ui, repo, *pats, **opts):
394 opts = pycompat.byteskwargs(opts)
395 opts = pycompat.byteskwargs(opts)
395 match = scmutil.match(repo['.'], pats, {})
396 match = scmutil.match(repo['.'], pats, {})
396 message = opts['message']
397 message = opts['message']
397 return commitfunc(ui, repo, message, match, opts)
398 return commitfunc(ui, repo, message, match, opts)
398
399
399 return interactivecommitfunc if interactive else commitfunc
400 return interactivecommitfunc if interactive else commitfunc
400
401
401 def _nothingtoshelvemessaging(ui, repo, pats, opts):
402 def _nothingtoshelvemessaging(ui, repo, pats, opts):
402 stat = repo.status(match=scmutil.match(repo[None], pats, opts))
403 stat = repo.status(match=scmutil.match(repo[None], pats, opts))
403 if stat.deleted:
404 if stat.deleted:
404 ui.status(_("nothing changed (%d missing files, see "
405 ui.status(_("nothing changed (%d missing files, see "
405 "'hg status')\n") % len(stat.deleted))
406 "'hg status')\n") % len(stat.deleted))
406 else:
407 else:
407 ui.status(_("nothing changed\n"))
408 ui.status(_("nothing changed\n"))
408
409
409 def _shelvecreatedcommit(repo, node, name):
410 def _shelvecreatedcommit(repo, node, name):
410 bases = list(mutableancestors(repo[node]))
411 bases = list(mutableancestors(repo[node]))
411 shelvedfile(repo, name, 'hg').writebundle(bases, node)
412 shelvedfile(repo, name, 'hg').writebundle(bases, node)
412 cmdutil.export(repo, [node],
413 cmdutil.export(repo, [node],
413 fp=shelvedfile(repo, name, patchextension).opener('wb'),
414 fp=shelvedfile(repo, name, patchextension).opener('wb'),
414 opts=mdiff.diffopts(git=True))
415 opts=mdiff.diffopts(git=True))
415
416
416 def _includeunknownfiles(repo, pats, opts, extra):
417 def _includeunknownfiles(repo, pats, opts, extra):
417 s = repo.status(match=scmutil.match(repo[None], pats, opts),
418 s = repo.status(match=scmutil.match(repo[None], pats, opts),
418 unknown=True)
419 unknown=True)
419 if s.unknown:
420 if s.unknown:
420 extra['shelve_unknown'] = '\0'.join(s.unknown)
421 extra['shelve_unknown'] = '\0'.join(s.unknown)
421 repo[None].add(s.unknown)
422 repo[None].add(s.unknown)
422
423
423 def _finishshelve(repo):
424 def _finishshelve(repo):
424 _aborttransaction(repo)
425 _aborttransaction(repo)
425
426
426 def _docreatecmd(ui, repo, pats, opts):
427 def _docreatecmd(ui, repo, pats, opts):
427 wctx = repo[None]
428 wctx = repo[None]
428 parents = wctx.parents()
429 parents = wctx.parents()
429 if len(parents) > 1:
430 if len(parents) > 1:
430 raise error.Abort(_('cannot shelve while merging'))
431 raise error.Abort(_('cannot shelve while merging'))
431 parent = parents[0]
432 parent = parents[0]
432 origbranch = wctx.branch()
433 origbranch = wctx.branch()
433
434
434 if parent.node() != nodemod.nullid:
435 if parent.node() != nodemod.nullid:
435 desc = "changes to: %s" % parent.description().split('\n', 1)[0]
436 desc = "changes to: %s" % parent.description().split('\n', 1)[0]
436 else:
437 else:
437 desc = '(changes in empty repository)'
438 desc = '(changes in empty repository)'
438
439
439 if not opts.get('message'):
440 if not opts.get('message'):
440 opts['message'] = desc
441 opts['message'] = desc
441
442
442 lock = tr = activebookmark = None
443 lock = tr = activebookmark = None
443 try:
444 try:
444 lock = repo.lock()
445 lock = repo.lock()
445
446
446 # use an uncommitted transaction to generate the bundle to avoid
447 # use an uncommitted transaction to generate the bundle to avoid
447 # pull races. ensure we don't print the abort message to stderr.
448 # pull races. ensure we don't print the abort message to stderr.
448 tr = repo.transaction('commit', report=lambda x: None)
449 tr = repo.transaction('commit', report=lambda x: None)
449
450
450 interactive = opts.get('interactive', False)
451 interactive = opts.get('interactive', False)
451 includeunknown = (opts.get('unknown', False) and
452 includeunknown = (opts.get('unknown', False) and
452 not opts.get('addremove', False))
453 not opts.get('addremove', False))
453
454
454 name = getshelvename(repo, parent, opts)
455 name = getshelvename(repo, parent, opts)
455 activebookmark = _backupactivebookmark(repo)
456 activebookmark = _backupactivebookmark(repo)
456 extra = {}
457 extra = {}
457 if includeunknown:
458 if includeunknown:
458 _includeunknownfiles(repo, pats, opts, extra)
459 _includeunknownfiles(repo, pats, opts, extra)
459
460
460 if _iswctxonnewbranch(repo) and not _isbareshelve(pats, opts):
461 if _iswctxonnewbranch(repo) and not _isbareshelve(pats, opts):
461 # In non-bare shelve we don't store newly created branch
462 # In non-bare shelve we don't store newly created branch
462 # at bundled commit
463 # at bundled commit
463 repo.dirstate.setbranch(repo['.'].branch())
464 repo.dirstate.setbranch(repo['.'].branch())
464
465
465 commitfunc = getcommitfunc(extra, interactive, editor=True)
466 commitfunc = getcommitfunc(extra, interactive, editor=True)
466 if not interactive:
467 if not interactive:
467 node = cmdutil.commit(ui, repo, commitfunc, pats, opts)
468 node = cmdutil.commit(ui, repo, commitfunc, pats, opts)
468 else:
469 else:
469 node = cmdutil.dorecord(ui, repo, commitfunc, None,
470 node = cmdutil.dorecord(ui, repo, commitfunc, None,
470 False, cmdutil.recordfilter, *pats,
471 False, cmdutil.recordfilter, *pats,
471 **pycompat.strkwargs(opts))
472 **pycompat.strkwargs(opts))
472 if not node:
473 if not node:
473 _nothingtoshelvemessaging(ui, repo, pats, opts)
474 _nothingtoshelvemessaging(ui, repo, pats, opts)
474 return 1
475 return 1
475
476
476 _shelvecreatedcommit(repo, node, name)
477 _shelvecreatedcommit(repo, node, name)
477
478
478 if ui.formatted():
479 if ui.formatted():
479 desc = util.ellipsis(desc, ui.termwidth())
480 desc = util.ellipsis(desc, ui.termwidth())
480 ui.status(_('shelved as %s\n') % name)
481 ui.status(_('shelved as %s\n') % name)
481 hg.update(repo, parent.node())
482 hg.update(repo, parent.node())
482 if origbranch != repo['.'].branch() and not _isbareshelve(pats, opts):
483 if origbranch != repo['.'].branch() and not _isbareshelve(pats, opts):
483 repo.dirstate.setbranch(origbranch)
484 repo.dirstate.setbranch(origbranch)
484
485
485 _finishshelve(repo)
486 _finishshelve(repo)
486 finally:
487 finally:
487 _restoreactivebookmark(repo, activebookmark)
488 _restoreactivebookmark(repo, activebookmark)
488 lockmod.release(tr, lock)
489 lockmod.release(tr, lock)
489
490
490 def _isbareshelve(pats, opts):
491 def _isbareshelve(pats, opts):
491 return (not pats
492 return (not pats
492 and not opts.get('interactive', False)
493 and not opts.get('interactive', False)
493 and not opts.get('include', False)
494 and not opts.get('include', False)
494 and not opts.get('exclude', False))
495 and not opts.get('exclude', False))
495
496
496 def _iswctxonnewbranch(repo):
497 def _iswctxonnewbranch(repo):
497 return repo[None].branch() != repo['.'].branch()
498 return repo[None].branch() != repo['.'].branch()
498
499
499 def cleanupcmd(ui, repo):
500 def cleanupcmd(ui, repo):
500 """subcommand that deletes all shelves"""
501 """subcommand that deletes all shelves"""
501
502
502 with repo.wlock():
503 with repo.wlock():
503 for (name, _type) in repo.vfs.readdir(shelvedir):
504 for (name, _type) in repo.vfs.readdir(shelvedir):
504 suffix = name.rsplit('.', 1)[-1]
505 suffix = name.rsplit('.', 1)[-1]
505 if suffix in shelvefileextensions:
506 if suffix in shelvefileextensions:
506 shelvedfile(repo, name).movetobackup()
507 shelvedfile(repo, name).movetobackup()
507 cleanupoldbackups(repo)
508 cleanupoldbackups(repo)
508
509
509 def deletecmd(ui, repo, pats):
510 def deletecmd(ui, repo, pats):
510 """subcommand that deletes a specific shelve"""
511 """subcommand that deletes a specific shelve"""
511 if not pats:
512 if not pats:
512 raise error.Abort(_('no shelved changes specified!'))
513 raise error.Abort(_('no shelved changes specified!'))
513 with repo.wlock():
514 with repo.wlock():
514 try:
515 try:
515 for name in pats:
516 for name in pats:
516 for suffix in shelvefileextensions:
517 for suffix in shelvefileextensions:
517 shfile = shelvedfile(repo, name, suffix)
518 shfile = shelvedfile(repo, name, suffix)
518 # patch file is necessary, as it should
519 # patch file is necessary, as it should
519 # be present for any kind of shelve,
520 # be present for any kind of shelve,
520 # but the .hg file is optional as in future we
521 # but the .hg file is optional as in future we
521 # will add obsolete shelve with does not create a
522 # will add obsolete shelve with does not create a
522 # bundle
523 # bundle
523 if shfile.exists() or suffix == patchextension:
524 if shfile.exists() or suffix == patchextension:
524 shfile.movetobackup()
525 shfile.movetobackup()
525 cleanupoldbackups(repo)
526 cleanupoldbackups(repo)
526 except OSError as err:
527 except OSError as err:
527 if err.errno != errno.ENOENT:
528 if err.errno != errno.ENOENT:
528 raise
529 raise
529 raise error.Abort(_("shelved change '%s' not found") % name)
530 raise error.Abort(_("shelved change '%s' not found") % name)
530
531
531 def listshelves(repo):
532 def listshelves(repo):
532 """return all shelves in repo as list of (time, filename)"""
533 """return all shelves in repo as list of (time, filename)"""
533 try:
534 try:
534 names = repo.vfs.readdir(shelvedir)
535 names = repo.vfs.readdir(shelvedir)
535 except OSError as err:
536 except OSError as err:
536 if err.errno != errno.ENOENT:
537 if err.errno != errno.ENOENT:
537 raise
538 raise
538 return []
539 return []
539 info = []
540 info = []
540 for (name, _type) in names:
541 for (name, _type) in names:
541 pfx, sfx = name.rsplit('.', 1)
542 pfx, sfx = name.rsplit('.', 1)
542 if not pfx or sfx != patchextension:
543 if not pfx or sfx != patchextension:
543 continue
544 continue
544 st = shelvedfile(repo, name).stat()
545 st = shelvedfile(repo, name).stat()
545 info.append((st.st_mtime, shelvedfile(repo, pfx).filename()))
546 info.append((st[stat.ST_MTIME], shelvedfile(repo, pfx).filename()))
546 return sorted(info, reverse=True)
547 return sorted(info, reverse=True)
547
548
548 def listcmd(ui, repo, pats, opts):
549 def listcmd(ui, repo, pats, opts):
549 """subcommand that displays the list of shelves"""
550 """subcommand that displays the list of shelves"""
550 pats = set(pats)
551 pats = set(pats)
551 width = 80
552 width = 80
552 if not ui.plain():
553 if not ui.plain():
553 width = ui.termwidth()
554 width = ui.termwidth()
554 namelabel = 'shelve.newest'
555 namelabel = 'shelve.newest'
555 ui.pager('shelve')
556 ui.pager('shelve')
556 for mtime, name in listshelves(repo):
557 for mtime, name in listshelves(repo):
557 sname = util.split(name)[1]
558 sname = util.split(name)[1]
558 if pats and sname not in pats:
559 if pats and sname not in pats:
559 continue
560 continue
560 ui.write(sname, label=namelabel)
561 ui.write(sname, label=namelabel)
561 namelabel = 'shelve.name'
562 namelabel = 'shelve.name'
562 if ui.quiet:
563 if ui.quiet:
563 ui.write('\n')
564 ui.write('\n')
564 continue
565 continue
565 ui.write(' ' * (16 - len(sname)))
566 ui.write(' ' * (16 - len(sname)))
566 used = 16
567 used = 16
567 date = dateutil.makedate(mtime)
568 date = dateutil.makedate(mtime)
568 age = '(%s)' % templatefilters.age(date, abbrev=True)
569 age = '(%s)' % templatefilters.age(date, abbrev=True)
569 ui.write(age, label='shelve.age')
570 ui.write(age, label='shelve.age')
570 ui.write(' ' * (12 - len(age)))
571 ui.write(' ' * (12 - len(age)))
571 used += 12
572 used += 12
572 with open(name + '.' + patchextension, 'rb') as fp:
573 with open(name + '.' + patchextension, 'rb') as fp:
573 while True:
574 while True:
574 line = fp.readline()
575 line = fp.readline()
575 if not line:
576 if not line:
576 break
577 break
577 if not line.startswith('#'):
578 if not line.startswith('#'):
578 desc = line.rstrip()
579 desc = line.rstrip()
579 if ui.formatted():
580 if ui.formatted():
580 desc = util.ellipsis(desc, width - used)
581 desc = util.ellipsis(desc, width - used)
581 ui.write(desc)
582 ui.write(desc)
582 break
583 break
583 ui.write('\n')
584 ui.write('\n')
584 if not (opts['patch'] or opts['stat']):
585 if not (opts['patch'] or opts['stat']):
585 continue
586 continue
586 difflines = fp.readlines()
587 difflines = fp.readlines()
587 if opts['patch']:
588 if opts['patch']:
588 for chunk, label in patch.difflabel(iter, difflines):
589 for chunk, label in patch.difflabel(iter, difflines):
589 ui.write(chunk, label=label)
590 ui.write(chunk, label=label)
590 if opts['stat']:
591 if opts['stat']:
591 for chunk, label in patch.diffstatui(difflines, width=width):
592 for chunk, label in patch.diffstatui(difflines, width=width):
592 ui.write(chunk, label=label)
593 ui.write(chunk, label=label)
593
594
594 def patchcmds(ui, repo, pats, opts, subcommand):
595 def patchcmds(ui, repo, pats, opts, subcommand):
595 """subcommand that displays shelves"""
596 """subcommand that displays shelves"""
596 if len(pats) == 0:
597 if len(pats) == 0:
597 raise error.Abort(_("--%s expects at least one shelf") % subcommand)
598 raise error.Abort(_("--%s expects at least one shelf") % subcommand)
598
599
599 for shelfname in pats:
600 for shelfname in pats:
600 if not shelvedfile(repo, shelfname, patchextension).exists():
601 if not shelvedfile(repo, shelfname, patchextension).exists():
601 raise error.Abort(_("cannot find shelf %s") % shelfname)
602 raise error.Abort(_("cannot find shelf %s") % shelfname)
602
603
603 listcmd(ui, repo, pats, opts)
604 listcmd(ui, repo, pats, opts)
604
605
605 def checkparents(repo, state):
606 def checkparents(repo, state):
606 """check parent while resuming an unshelve"""
607 """check parent while resuming an unshelve"""
607 if state.parents != repo.dirstate.parents():
608 if state.parents != repo.dirstate.parents():
608 raise error.Abort(_('working directory parents do not match unshelve '
609 raise error.Abort(_('working directory parents do not match unshelve '
609 'state'))
610 'state'))
610
611
611 def pathtofiles(repo, files):
612 def pathtofiles(repo, files):
612 cwd = repo.getcwd()
613 cwd = repo.getcwd()
613 return [repo.pathto(f, cwd) for f in files]
614 return [repo.pathto(f, cwd) for f in files]
614
615
615 def unshelveabort(ui, repo, state, opts):
616 def unshelveabort(ui, repo, state, opts):
616 """subcommand that abort an in-progress unshelve"""
617 """subcommand that abort an in-progress unshelve"""
617 with repo.lock():
618 with repo.lock():
618 try:
619 try:
619 checkparents(repo, state)
620 checkparents(repo, state)
620
621
621 repo.vfs.rename('unshelverebasestate', 'rebasestate')
622 repo.vfs.rename('unshelverebasestate', 'rebasestate')
622 try:
623 try:
623 rebase.rebase(ui, repo, **{
624 rebase.rebase(ui, repo, **{
624 r'abort' : True
625 r'abort' : True
625 })
626 })
626 except Exception:
627 except Exception:
627 repo.vfs.rename('rebasestate', 'unshelverebasestate')
628 repo.vfs.rename('rebasestate', 'unshelverebasestate')
628 raise
629 raise
629
630
630 mergefiles(ui, repo, state.wctx, state.pendingctx)
631 mergefiles(ui, repo, state.wctx, state.pendingctx)
631 repair.strip(ui, repo, state.nodestoremove, backup=False,
632 repair.strip(ui, repo, state.nodestoremove, backup=False,
632 topic='shelve')
633 topic='shelve')
633 finally:
634 finally:
634 shelvedstate.clear(repo)
635 shelvedstate.clear(repo)
635 ui.warn(_("unshelve of '%s' aborted\n") % state.name)
636 ui.warn(_("unshelve of '%s' aborted\n") % state.name)
636
637
637 def mergefiles(ui, repo, wctx, shelvectx):
638 def mergefiles(ui, repo, wctx, shelvectx):
638 """updates to wctx and merges the changes from shelvectx into the
639 """updates to wctx and merges the changes from shelvectx into the
639 dirstate."""
640 dirstate."""
640 with ui.configoverride({('ui', 'quiet'): True}):
641 with ui.configoverride({('ui', 'quiet'): True}):
641 hg.update(repo, wctx.node())
642 hg.update(repo, wctx.node())
642 files = []
643 files = []
643 files.extend(shelvectx.files())
644 files.extend(shelvectx.files())
644 files.extend(shelvectx.parents()[0].files())
645 files.extend(shelvectx.parents()[0].files())
645
646
646 # revert will overwrite unknown files, so move them out of the way
647 # revert will overwrite unknown files, so move them out of the way
647 for file in repo.status(unknown=True).unknown:
648 for file in repo.status(unknown=True).unknown:
648 if file in files:
649 if file in files:
649 util.rename(file, scmutil.origpath(ui, repo, file))
650 util.rename(file, scmutil.origpath(ui, repo, file))
650 ui.pushbuffer(True)
651 ui.pushbuffer(True)
651 cmdutil.revert(ui, repo, shelvectx, repo.dirstate.parents(),
652 cmdutil.revert(ui, repo, shelvectx, repo.dirstate.parents(),
652 *pathtofiles(repo, files),
653 *pathtofiles(repo, files),
653 **{r'no_backup': True})
654 **{r'no_backup': True})
654 ui.popbuffer()
655 ui.popbuffer()
655
656
656 def restorebranch(ui, repo, branchtorestore):
657 def restorebranch(ui, repo, branchtorestore):
657 if branchtorestore and branchtorestore != repo.dirstate.branch():
658 if branchtorestore and branchtorestore != repo.dirstate.branch():
658 repo.dirstate.setbranch(branchtorestore)
659 repo.dirstate.setbranch(branchtorestore)
659 ui.status(_('marked working directory as branch %s\n')
660 ui.status(_('marked working directory as branch %s\n')
660 % branchtorestore)
661 % branchtorestore)
661
662
662 def unshelvecleanup(ui, repo, name, opts):
663 def unshelvecleanup(ui, repo, name, opts):
663 """remove related files after an unshelve"""
664 """remove related files after an unshelve"""
664 if not opts.get('keep'):
665 if not opts.get('keep'):
665 for filetype in shelvefileextensions:
666 for filetype in shelvefileextensions:
666 shfile = shelvedfile(repo, name, filetype)
667 shfile = shelvedfile(repo, name, filetype)
667 if shfile.exists():
668 if shfile.exists():
668 shfile.movetobackup()
669 shfile.movetobackup()
669 cleanupoldbackups(repo)
670 cleanupoldbackups(repo)
670
671
671 def unshelvecontinue(ui, repo, state, opts):
672 def unshelvecontinue(ui, repo, state, opts):
672 """subcommand to continue an in-progress unshelve"""
673 """subcommand to continue an in-progress unshelve"""
673 # We're finishing off a merge. First parent is our original
674 # We're finishing off a merge. First parent is our original
674 # parent, second is the temporary "fake" commit we're unshelving.
675 # parent, second is the temporary "fake" commit we're unshelving.
675 with repo.lock():
676 with repo.lock():
676 checkparents(repo, state)
677 checkparents(repo, state)
677 ms = merge.mergestate.read(repo)
678 ms = merge.mergestate.read(repo)
678 if list(ms.unresolved()):
679 if list(ms.unresolved()):
679 raise error.Abort(
680 raise error.Abort(
680 _("unresolved conflicts, can't continue"),
681 _("unresolved conflicts, can't continue"),
681 hint=_("see 'hg resolve', then 'hg unshelve --continue'"))
682 hint=_("see 'hg resolve', then 'hg unshelve --continue'"))
682
683
683 repo.vfs.rename('unshelverebasestate', 'rebasestate')
684 repo.vfs.rename('unshelverebasestate', 'rebasestate')
684 try:
685 try:
685 rebase.rebase(ui, repo, **{
686 rebase.rebase(ui, repo, **{
686 r'continue' : True
687 r'continue' : True
687 })
688 })
688 except Exception:
689 except Exception:
689 repo.vfs.rename('rebasestate', 'unshelverebasestate')
690 repo.vfs.rename('rebasestate', 'unshelverebasestate')
690 raise
691 raise
691
692
692 shelvectx = repo['tip']
693 shelvectx = repo['tip']
693 if state.pendingctx not in shelvectx.parents():
694 if state.pendingctx not in shelvectx.parents():
694 # rebase was a no-op, so it produced no child commit
695 # rebase was a no-op, so it produced no child commit
695 shelvectx = state.pendingctx
696 shelvectx = state.pendingctx
696 else:
697 else:
697 # only strip the shelvectx if the rebase produced it
698 # only strip the shelvectx if the rebase produced it
698 state.nodestoremove.append(shelvectx.node())
699 state.nodestoremove.append(shelvectx.node())
699
700
700 mergefiles(ui, repo, state.wctx, shelvectx)
701 mergefiles(ui, repo, state.wctx, shelvectx)
701 restorebranch(ui, repo, state.branchtorestore)
702 restorebranch(ui, repo, state.branchtorestore)
702
703
703 repair.strip(ui, repo, state.nodestoremove, backup=False,
704 repair.strip(ui, repo, state.nodestoremove, backup=False,
704 topic='shelve')
705 topic='shelve')
705 _restoreactivebookmark(repo, state.activebookmark)
706 _restoreactivebookmark(repo, state.activebookmark)
706 shelvedstate.clear(repo)
707 shelvedstate.clear(repo)
707 unshelvecleanup(ui, repo, state.name, opts)
708 unshelvecleanup(ui, repo, state.name, opts)
708 ui.status(_("unshelve of '%s' complete\n") % state.name)
709 ui.status(_("unshelve of '%s' complete\n") % state.name)
709
710
710 def _commitworkingcopychanges(ui, repo, opts, tmpwctx):
711 def _commitworkingcopychanges(ui, repo, opts, tmpwctx):
711 """Temporarily commit working copy changes before moving unshelve commit"""
712 """Temporarily commit working copy changes before moving unshelve commit"""
712 # Store pending changes in a commit and remember added in case a shelve
713 # Store pending changes in a commit and remember added in case a shelve
713 # contains unknown files that are part of the pending change
714 # contains unknown files that are part of the pending change
714 s = repo.status()
715 s = repo.status()
715 addedbefore = frozenset(s.added)
716 addedbefore = frozenset(s.added)
716 if not (s.modified or s.added or s.removed):
717 if not (s.modified or s.added or s.removed):
717 return tmpwctx, addedbefore
718 return tmpwctx, addedbefore
718 ui.status(_("temporarily committing pending changes "
719 ui.status(_("temporarily committing pending changes "
719 "(restore with 'hg unshelve --abort')\n"))
720 "(restore with 'hg unshelve --abort')\n"))
720 commitfunc = getcommitfunc(extra=None, interactive=False,
721 commitfunc = getcommitfunc(extra=None, interactive=False,
721 editor=False)
722 editor=False)
722 tempopts = {}
723 tempopts = {}
723 tempopts['message'] = "pending changes temporary commit"
724 tempopts['message'] = "pending changes temporary commit"
724 tempopts['date'] = opts.get('date')
725 tempopts['date'] = opts.get('date')
725 with ui.configoverride({('ui', 'quiet'): True}):
726 with ui.configoverride({('ui', 'quiet'): True}):
726 node = cmdutil.commit(ui, repo, commitfunc, [], tempopts)
727 node = cmdutil.commit(ui, repo, commitfunc, [], tempopts)
727 tmpwctx = repo[node]
728 tmpwctx = repo[node]
728 return tmpwctx, addedbefore
729 return tmpwctx, addedbefore
729
730
730 def _unshelverestorecommit(ui, repo, basename):
731 def _unshelverestorecommit(ui, repo, basename):
731 """Recreate commit in the repository during the unshelve"""
732 """Recreate commit in the repository during the unshelve"""
732 with ui.configoverride({('ui', 'quiet'): True}):
733 with ui.configoverride({('ui', 'quiet'): True}):
733 shelvedfile(repo, basename, 'hg').applybundle()
734 shelvedfile(repo, basename, 'hg').applybundle()
734 shelvectx = repo['tip']
735 shelvectx = repo['tip']
735 return repo, shelvectx
736 return repo, shelvectx
736
737
737 def _rebaserestoredcommit(ui, repo, opts, tr, oldtiprev, basename, pctx,
738 def _rebaserestoredcommit(ui, repo, opts, tr, oldtiprev, basename, pctx,
738 tmpwctx, shelvectx, branchtorestore,
739 tmpwctx, shelvectx, branchtorestore,
739 activebookmark):
740 activebookmark):
740 """Rebase restored commit from its original location to a destination"""
741 """Rebase restored commit from its original location to a destination"""
741 # If the shelve is not immediately on top of the commit
742 # If the shelve is not immediately on top of the commit
742 # we'll be merging with, rebase it to be on top.
743 # we'll be merging with, rebase it to be on top.
743 if tmpwctx.node() == shelvectx.parents()[0].node():
744 if tmpwctx.node() == shelvectx.parents()[0].node():
744 return shelvectx
745 return shelvectx
745
746
746 ui.status(_('rebasing shelved changes\n'))
747 ui.status(_('rebasing shelved changes\n'))
747 try:
748 try:
748 rebase.rebase(ui, repo, **{
749 rebase.rebase(ui, repo, **{
749 r'rev': [shelvectx.rev()],
750 r'rev': [shelvectx.rev()],
750 r'dest': "%d" % tmpwctx.rev(),
751 r'dest': "%d" % tmpwctx.rev(),
751 r'keep': True,
752 r'keep': True,
752 r'tool': opts.get('tool', ''),
753 r'tool': opts.get('tool', ''),
753 })
754 })
754 except error.InterventionRequired:
755 except error.InterventionRequired:
755 tr.close()
756 tr.close()
756
757
757 nodestoremove = [repo.changelog.node(rev)
758 nodestoremove = [repo.changelog.node(rev)
758 for rev in xrange(oldtiprev, len(repo))]
759 for rev in xrange(oldtiprev, len(repo))]
759 shelvedstate.save(repo, basename, pctx, tmpwctx, nodestoremove,
760 shelvedstate.save(repo, basename, pctx, tmpwctx, nodestoremove,
760 branchtorestore, opts.get('keep'), activebookmark)
761 branchtorestore, opts.get('keep'), activebookmark)
761
762
762 repo.vfs.rename('rebasestate', 'unshelverebasestate')
763 repo.vfs.rename('rebasestate', 'unshelverebasestate')
763 raise error.InterventionRequired(
764 raise error.InterventionRequired(
764 _("unresolved conflicts (see 'hg resolve', then "
765 _("unresolved conflicts (see 'hg resolve', then "
765 "'hg unshelve --continue')"))
766 "'hg unshelve --continue')"))
766
767
767 # refresh ctx after rebase completes
768 # refresh ctx after rebase completes
768 shelvectx = repo['tip']
769 shelvectx = repo['tip']
769
770
770 if tmpwctx not in shelvectx.parents():
771 if tmpwctx not in shelvectx.parents():
771 # rebase was a no-op, so it produced no child commit
772 # rebase was a no-op, so it produced no child commit
772 shelvectx = tmpwctx
773 shelvectx = tmpwctx
773 return shelvectx
774 return shelvectx
774
775
775 def _forgetunknownfiles(repo, shelvectx, addedbefore):
776 def _forgetunknownfiles(repo, shelvectx, addedbefore):
776 # Forget any files that were unknown before the shelve, unknown before
777 # Forget any files that were unknown before the shelve, unknown before
777 # unshelve started, but are now added.
778 # unshelve started, but are now added.
778 shelveunknown = shelvectx.extra().get('shelve_unknown')
779 shelveunknown = shelvectx.extra().get('shelve_unknown')
779 if not shelveunknown:
780 if not shelveunknown:
780 return
781 return
781 shelveunknown = frozenset(shelveunknown.split('\0'))
782 shelveunknown = frozenset(shelveunknown.split('\0'))
782 addedafter = frozenset(repo.status().added)
783 addedafter = frozenset(repo.status().added)
783 toforget = (addedafter & shelveunknown) - addedbefore
784 toforget = (addedafter & shelveunknown) - addedbefore
784 repo[None].forget(toforget)
785 repo[None].forget(toforget)
785
786
786 def _finishunshelve(repo, oldtiprev, tr, activebookmark):
787 def _finishunshelve(repo, oldtiprev, tr, activebookmark):
787 _restoreactivebookmark(repo, activebookmark)
788 _restoreactivebookmark(repo, activebookmark)
788 # The transaction aborting will strip all the commits for us,
789 # The transaction aborting will strip all the commits for us,
789 # but it doesn't update the inmemory structures, so addchangegroup
790 # but it doesn't update the inmemory structures, so addchangegroup
790 # hooks still fire and try to operate on the missing commits.
791 # hooks still fire and try to operate on the missing commits.
791 # Clean up manually to prevent this.
792 # Clean up manually to prevent this.
792 repo.unfiltered().changelog.strip(oldtiprev, tr)
793 repo.unfiltered().changelog.strip(oldtiprev, tr)
793 _aborttransaction(repo)
794 _aborttransaction(repo)
794
795
795 def _checkunshelveuntrackedproblems(ui, repo, shelvectx):
796 def _checkunshelveuntrackedproblems(ui, repo, shelvectx):
796 """Check potential problems which may result from working
797 """Check potential problems which may result from working
797 copy having untracked changes."""
798 copy having untracked changes."""
798 wcdeleted = set(repo.status().deleted)
799 wcdeleted = set(repo.status().deleted)
799 shelvetouched = set(shelvectx.files())
800 shelvetouched = set(shelvectx.files())
800 intersection = wcdeleted.intersection(shelvetouched)
801 intersection = wcdeleted.intersection(shelvetouched)
801 if intersection:
802 if intersection:
802 m = _("shelved change touches missing files")
803 m = _("shelved change touches missing files")
803 hint = _("run hg status to see which files are missing")
804 hint = _("run hg status to see which files are missing")
804 raise error.Abort(m, hint=hint)
805 raise error.Abort(m, hint=hint)
805
806
806 @command('unshelve',
807 @command('unshelve',
807 [('a', 'abort', None,
808 [('a', 'abort', None,
808 _('abort an incomplete unshelve operation')),
809 _('abort an incomplete unshelve operation')),
809 ('c', 'continue', None,
810 ('c', 'continue', None,
810 _('continue an incomplete unshelve operation')),
811 _('continue an incomplete unshelve operation')),
811 ('k', 'keep', None,
812 ('k', 'keep', None,
812 _('keep shelve after unshelving')),
813 _('keep shelve after unshelving')),
813 ('n', 'name', '',
814 ('n', 'name', '',
814 _('restore shelved change with given name'), _('NAME')),
815 _('restore shelved change with given name'), _('NAME')),
815 ('t', 'tool', '', _('specify merge tool')),
816 ('t', 'tool', '', _('specify merge tool')),
816 ('', 'date', '',
817 ('', 'date', '',
817 _('set date for temporary commits (DEPRECATED)'), _('DATE'))],
818 _('set date for temporary commits (DEPRECATED)'), _('DATE'))],
818 _('hg unshelve [[-n] SHELVED]'))
819 _('hg unshelve [[-n] SHELVED]'))
819 def unshelve(ui, repo, *shelved, **opts):
820 def unshelve(ui, repo, *shelved, **opts):
820 """restore a shelved change to the working directory
821 """restore a shelved change to the working directory
821
822
822 This command accepts an optional name of a shelved change to
823 This command accepts an optional name of a shelved change to
823 restore. If none is given, the most recent shelved change is used.
824 restore. If none is given, the most recent shelved change is used.
824
825
825 If a shelved change is applied successfully, the bundle that
826 If a shelved change is applied successfully, the bundle that
826 contains the shelved changes is moved to a backup location
827 contains the shelved changes is moved to a backup location
827 (.hg/shelve-backup).
828 (.hg/shelve-backup).
828
829
829 Since you can restore a shelved change on top of an arbitrary
830 Since you can restore a shelved change on top of an arbitrary
830 commit, it is possible that unshelving will result in a conflict
831 commit, it is possible that unshelving will result in a conflict
831 between your changes and the commits you are unshelving onto. If
832 between your changes and the commits you are unshelving onto. If
832 this occurs, you must resolve the conflict, then use
833 this occurs, you must resolve the conflict, then use
833 ``--continue`` to complete the unshelve operation. (The bundle
834 ``--continue`` to complete the unshelve operation. (The bundle
834 will not be moved until you successfully complete the unshelve.)
835 will not be moved until you successfully complete the unshelve.)
835
836
836 (Alternatively, you can use ``--abort`` to abandon an unshelve
837 (Alternatively, you can use ``--abort`` to abandon an unshelve
837 that causes a conflict. This reverts the unshelved changes, and
838 that causes a conflict. This reverts the unshelved changes, and
838 leaves the bundle in place.)
839 leaves the bundle in place.)
839
840
840 If bare shelved change(when no files are specified, without interactive,
841 If bare shelved change(when no files are specified, without interactive,
841 include and exclude option) was done on newly created branch it would
842 include and exclude option) was done on newly created branch it would
842 restore branch information to the working directory.
843 restore branch information to the working directory.
843
844
844 After a successful unshelve, the shelved changes are stored in a
845 After a successful unshelve, the shelved changes are stored in a
845 backup directory. Only the N most recent backups are kept. N
846 backup directory. Only the N most recent backups are kept. N
846 defaults to 10 but can be overridden using the ``shelve.maxbackups``
847 defaults to 10 but can be overridden using the ``shelve.maxbackups``
847 configuration option.
848 configuration option.
848
849
849 .. container:: verbose
850 .. container:: verbose
850
851
851 Timestamp in seconds is used to decide order of backups. More
852 Timestamp in seconds is used to decide order of backups. More
852 than ``maxbackups`` backups are kept, if same timestamp
853 than ``maxbackups`` backups are kept, if same timestamp
853 prevents from deciding exact order of them, for safety.
854 prevents from deciding exact order of them, for safety.
854 """
855 """
855 with repo.wlock():
856 with repo.wlock():
856 return _dounshelve(ui, repo, *shelved, **opts)
857 return _dounshelve(ui, repo, *shelved, **opts)
857
858
858 def _dounshelve(ui, repo, *shelved, **opts):
859 def _dounshelve(ui, repo, *shelved, **opts):
859 opts = pycompat.byteskwargs(opts)
860 opts = pycompat.byteskwargs(opts)
860 abortf = opts.get('abort')
861 abortf = opts.get('abort')
861 continuef = opts.get('continue')
862 continuef = opts.get('continue')
862 if not abortf and not continuef:
863 if not abortf and not continuef:
863 cmdutil.checkunfinished(repo)
864 cmdutil.checkunfinished(repo)
864 shelved = list(shelved)
865 shelved = list(shelved)
865 if opts.get("name"):
866 if opts.get("name"):
866 shelved.append(opts["name"])
867 shelved.append(opts["name"])
867
868
868 if abortf or continuef:
869 if abortf or continuef:
869 if abortf and continuef:
870 if abortf and continuef:
870 raise error.Abort(_('cannot use both abort and continue'))
871 raise error.Abort(_('cannot use both abort and continue'))
871 if shelved:
872 if shelved:
872 raise error.Abort(_('cannot combine abort/continue with '
873 raise error.Abort(_('cannot combine abort/continue with '
873 'naming a shelved change'))
874 'naming a shelved change'))
874 if abortf and opts.get('tool', False):
875 if abortf and opts.get('tool', False):
875 ui.warn(_('tool option will be ignored\n'))
876 ui.warn(_('tool option will be ignored\n'))
876
877
877 try:
878 try:
878 state = shelvedstate.load(repo)
879 state = shelvedstate.load(repo)
879 if opts.get('keep') is None:
880 if opts.get('keep') is None:
880 opts['keep'] = state.keep
881 opts['keep'] = state.keep
881 except IOError as err:
882 except IOError as err:
882 if err.errno != errno.ENOENT:
883 if err.errno != errno.ENOENT:
883 raise
884 raise
884 cmdutil.wrongtooltocontinue(repo, _('unshelve'))
885 cmdutil.wrongtooltocontinue(repo, _('unshelve'))
885 except error.CorruptedState as err:
886 except error.CorruptedState as err:
886 ui.debug(pycompat.bytestr(err) + '\n')
887 ui.debug(pycompat.bytestr(err) + '\n')
887 if continuef:
888 if continuef:
888 msg = _('corrupted shelved state file')
889 msg = _('corrupted shelved state file')
889 hint = _('please run hg unshelve --abort to abort unshelve '
890 hint = _('please run hg unshelve --abort to abort unshelve '
890 'operation')
891 'operation')
891 raise error.Abort(msg, hint=hint)
892 raise error.Abort(msg, hint=hint)
892 elif abortf:
893 elif abortf:
893 msg = _('could not read shelved state file, your working copy '
894 msg = _('could not read shelved state file, your working copy '
894 'may be in an unexpected state\nplease update to some '
895 'may be in an unexpected state\nplease update to some '
895 'commit\n')
896 'commit\n')
896 ui.warn(msg)
897 ui.warn(msg)
897 shelvedstate.clear(repo)
898 shelvedstate.clear(repo)
898 return
899 return
899
900
900 if abortf:
901 if abortf:
901 return unshelveabort(ui, repo, state, opts)
902 return unshelveabort(ui, repo, state, opts)
902 elif continuef:
903 elif continuef:
903 return unshelvecontinue(ui, repo, state, opts)
904 return unshelvecontinue(ui, repo, state, opts)
904 elif len(shelved) > 1:
905 elif len(shelved) > 1:
905 raise error.Abort(_('can only unshelve one change at a time'))
906 raise error.Abort(_('can only unshelve one change at a time'))
906 elif not shelved:
907 elif not shelved:
907 shelved = listshelves(repo)
908 shelved = listshelves(repo)
908 if not shelved:
909 if not shelved:
909 raise error.Abort(_('no shelved changes to apply!'))
910 raise error.Abort(_('no shelved changes to apply!'))
910 basename = util.split(shelved[0][1])[1]
911 basename = util.split(shelved[0][1])[1]
911 ui.status(_("unshelving change '%s'\n") % basename)
912 ui.status(_("unshelving change '%s'\n") % basename)
912 else:
913 else:
913 basename = shelved[0]
914 basename = shelved[0]
914
915
915 if not shelvedfile(repo, basename, patchextension).exists():
916 if not shelvedfile(repo, basename, patchextension).exists():
916 raise error.Abort(_("shelved change '%s' not found") % basename)
917 raise error.Abort(_("shelved change '%s' not found") % basename)
917
918
918 lock = tr = None
919 lock = tr = None
919 try:
920 try:
920 lock = repo.lock()
921 lock = repo.lock()
921 tr = repo.transaction('unshelve', report=lambda x: None)
922 tr = repo.transaction('unshelve', report=lambda x: None)
922 oldtiprev = len(repo)
923 oldtiprev = len(repo)
923
924
924 pctx = repo['.']
925 pctx = repo['.']
925 tmpwctx = pctx
926 tmpwctx = pctx
926 # The goal is to have a commit structure like so:
927 # The goal is to have a commit structure like so:
927 # ...-> pctx -> tmpwctx -> shelvectx
928 # ...-> pctx -> tmpwctx -> shelvectx
928 # where tmpwctx is an optional commit with the user's pending changes
929 # where tmpwctx is an optional commit with the user's pending changes
929 # and shelvectx is the unshelved changes. Then we merge it all down
930 # and shelvectx is the unshelved changes. Then we merge it all down
930 # to the original pctx.
931 # to the original pctx.
931
932
932 activebookmark = _backupactivebookmark(repo)
933 activebookmark = _backupactivebookmark(repo)
933 overrides = {('ui', 'forcemerge'): opts.get('tool', '')}
934 overrides = {('ui', 'forcemerge'): opts.get('tool', '')}
934 with ui.configoverride(overrides, 'unshelve'):
935 with ui.configoverride(overrides, 'unshelve'):
935 tmpwctx, addedbefore = _commitworkingcopychanges(ui, repo, opts,
936 tmpwctx, addedbefore = _commitworkingcopychanges(ui, repo, opts,
936 tmpwctx)
937 tmpwctx)
937 repo, shelvectx = _unshelverestorecommit(ui, repo, basename)
938 repo, shelvectx = _unshelverestorecommit(ui, repo, basename)
938 _checkunshelveuntrackedproblems(ui, repo, shelvectx)
939 _checkunshelveuntrackedproblems(ui, repo, shelvectx)
939 branchtorestore = ''
940 branchtorestore = ''
940 if shelvectx.branch() != shelvectx.p1().branch():
941 if shelvectx.branch() != shelvectx.p1().branch():
941 branchtorestore = shelvectx.branch()
942 branchtorestore = shelvectx.branch()
942
943
943 shelvectx = _rebaserestoredcommit(ui, repo, opts, tr, oldtiprev,
944 shelvectx = _rebaserestoredcommit(ui, repo, opts, tr, oldtiprev,
944 basename, pctx, tmpwctx,
945 basename, pctx, tmpwctx,
945 shelvectx, branchtorestore,
946 shelvectx, branchtorestore,
946 activebookmark)
947 activebookmark)
947 mergefiles(ui, repo, pctx, shelvectx)
948 mergefiles(ui, repo, pctx, shelvectx)
948 restorebranch(ui, repo, branchtorestore)
949 restorebranch(ui, repo, branchtorestore)
949 _forgetunknownfiles(repo, shelvectx, addedbefore)
950 _forgetunknownfiles(repo, shelvectx, addedbefore)
950
951
951 shelvedstate.clear(repo)
952 shelvedstate.clear(repo)
952 _finishunshelve(repo, oldtiprev, tr, activebookmark)
953 _finishunshelve(repo, oldtiprev, tr, activebookmark)
953 unshelvecleanup(ui, repo, basename, opts)
954 unshelvecleanup(ui, repo, basename, opts)
954 finally:
955 finally:
955 if tr:
956 if tr:
956 tr.release()
957 tr.release()
957 lockmod.release(lock)
958 lockmod.release(lock)
958
959
959 @command('shelve',
960 @command('shelve',
960 [('A', 'addremove', None,
961 [('A', 'addremove', None,
961 _('mark new/missing files as added/removed before shelving')),
962 _('mark new/missing files as added/removed before shelving')),
962 ('u', 'unknown', None,
963 ('u', 'unknown', None,
963 _('store unknown files in the shelve')),
964 _('store unknown files in the shelve')),
964 ('', 'cleanup', None,
965 ('', 'cleanup', None,
965 _('delete all shelved changes')),
966 _('delete all shelved changes')),
966 ('', 'date', '',
967 ('', 'date', '',
967 _('shelve with the specified commit date'), _('DATE')),
968 _('shelve with the specified commit date'), _('DATE')),
968 ('d', 'delete', None,
969 ('d', 'delete', None,
969 _('delete the named shelved change(s)')),
970 _('delete the named shelved change(s)')),
970 ('e', 'edit', False,
971 ('e', 'edit', False,
971 _('invoke editor on commit messages')),
972 _('invoke editor on commit messages')),
972 ('l', 'list', None,
973 ('l', 'list', None,
973 _('list current shelves')),
974 _('list current shelves')),
974 ('m', 'message', '',
975 ('m', 'message', '',
975 _('use text as shelve message'), _('TEXT')),
976 _('use text as shelve message'), _('TEXT')),
976 ('n', 'name', '',
977 ('n', 'name', '',
977 _('use the given name for the shelved commit'), _('NAME')),
978 _('use the given name for the shelved commit'), _('NAME')),
978 ('p', 'patch', None,
979 ('p', 'patch', None,
979 _('show patch')),
980 _('show patch')),
980 ('i', 'interactive', None,
981 ('i', 'interactive', None,
981 _('interactive mode, only works while creating a shelve')),
982 _('interactive mode, only works while creating a shelve')),
982 ('', 'stat', None,
983 ('', 'stat', None,
983 _('output diffstat-style summary of changes'))] + cmdutil.walkopts,
984 _('output diffstat-style summary of changes'))] + cmdutil.walkopts,
984 _('hg shelve [OPTION]... [FILE]...'))
985 _('hg shelve [OPTION]... [FILE]...'))
985 def shelvecmd(ui, repo, *pats, **opts):
986 def shelvecmd(ui, repo, *pats, **opts):
986 '''save and set aside changes from the working directory
987 '''save and set aside changes from the working directory
987
988
988 Shelving takes files that "hg status" reports as not clean, saves
989 Shelving takes files that "hg status" reports as not clean, saves
989 the modifications to a bundle (a shelved change), and reverts the
990 the modifications to a bundle (a shelved change), and reverts the
990 files so that their state in the working directory becomes clean.
991 files so that their state in the working directory becomes clean.
991
992
992 To restore these changes to the working directory, using "hg
993 To restore these changes to the working directory, using "hg
993 unshelve"; this will work even if you switch to a different
994 unshelve"; this will work even if you switch to a different
994 commit.
995 commit.
995
996
996 When no files are specified, "hg shelve" saves all not-clean
997 When no files are specified, "hg shelve" saves all not-clean
997 files. If specific files or directories are named, only changes to
998 files. If specific files or directories are named, only changes to
998 those files are shelved.
999 those files are shelved.
999
1000
1000 In bare shelve (when no files are specified, without interactive,
1001 In bare shelve (when no files are specified, without interactive,
1001 include and exclude option), shelving remembers information if the
1002 include and exclude option), shelving remembers information if the
1002 working directory was on newly created branch, in other words working
1003 working directory was on newly created branch, in other words working
1003 directory was on different branch than its first parent. In this
1004 directory was on different branch than its first parent. In this
1004 situation unshelving restores branch information to the working directory.
1005 situation unshelving restores branch information to the working directory.
1005
1006
1006 Each shelved change has a name that makes it easier to find later.
1007 Each shelved change has a name that makes it easier to find later.
1007 The name of a shelved change defaults to being based on the active
1008 The name of a shelved change defaults to being based on the active
1008 bookmark, or if there is no active bookmark, the current named
1009 bookmark, or if there is no active bookmark, the current named
1009 branch. To specify a different name, use ``--name``.
1010 branch. To specify a different name, use ``--name``.
1010
1011
1011 To see a list of existing shelved changes, use the ``--list``
1012 To see a list of existing shelved changes, use the ``--list``
1012 option. For each shelved change, this will print its name, age,
1013 option. For each shelved change, this will print its name, age,
1013 and description; use ``--patch`` or ``--stat`` for more details.
1014 and description; use ``--patch`` or ``--stat`` for more details.
1014
1015
1015 To delete specific shelved changes, use ``--delete``. To delete
1016 To delete specific shelved changes, use ``--delete``. To delete
1016 all shelved changes, use ``--cleanup``.
1017 all shelved changes, use ``--cleanup``.
1017 '''
1018 '''
1018 opts = pycompat.byteskwargs(opts)
1019 opts = pycompat.byteskwargs(opts)
1019 allowables = [
1020 allowables = [
1020 ('addremove', {'create'}), # 'create' is pseudo action
1021 ('addremove', {'create'}), # 'create' is pseudo action
1021 ('unknown', {'create'}),
1022 ('unknown', {'create'}),
1022 ('cleanup', {'cleanup'}),
1023 ('cleanup', {'cleanup'}),
1023 # ('date', {'create'}), # ignored for passing '--date "0 0"' in tests
1024 # ('date', {'create'}), # ignored for passing '--date "0 0"' in tests
1024 ('delete', {'delete'}),
1025 ('delete', {'delete'}),
1025 ('edit', {'create'}),
1026 ('edit', {'create'}),
1026 ('list', {'list'}),
1027 ('list', {'list'}),
1027 ('message', {'create'}),
1028 ('message', {'create'}),
1028 ('name', {'create'}),
1029 ('name', {'create'}),
1029 ('patch', {'patch', 'list'}),
1030 ('patch', {'patch', 'list'}),
1030 ('stat', {'stat', 'list'}),
1031 ('stat', {'stat', 'list'}),
1031 ]
1032 ]
1032 def checkopt(opt):
1033 def checkopt(opt):
1033 if opts.get(opt):
1034 if opts.get(opt):
1034 for i, allowable in allowables:
1035 for i, allowable in allowables:
1035 if opts[i] and opt not in allowable:
1036 if opts[i] and opt not in allowable:
1036 raise error.Abort(_("options '--%s' and '--%s' may not be "
1037 raise error.Abort(_("options '--%s' and '--%s' may not be "
1037 "used together") % (opt, i))
1038 "used together") % (opt, i))
1038 return True
1039 return True
1039 if checkopt('cleanup'):
1040 if checkopt('cleanup'):
1040 if pats:
1041 if pats:
1041 raise error.Abort(_("cannot specify names when using '--cleanup'"))
1042 raise error.Abort(_("cannot specify names when using '--cleanup'"))
1042 return cleanupcmd(ui, repo)
1043 return cleanupcmd(ui, repo)
1043 elif checkopt('delete'):
1044 elif checkopt('delete'):
1044 return deletecmd(ui, repo, pats)
1045 return deletecmd(ui, repo, pats)
1045 elif checkopt('list'):
1046 elif checkopt('list'):
1046 return listcmd(ui, repo, pats, opts)
1047 return listcmd(ui, repo, pats, opts)
1047 elif checkopt('patch'):
1048 elif checkopt('patch'):
1048 return patchcmds(ui, repo, pats, opts, subcommand='patch')
1049 return patchcmds(ui, repo, pats, opts, subcommand='patch')
1049 elif checkopt('stat'):
1050 elif checkopt('stat'):
1050 return patchcmds(ui, repo, pats, opts, subcommand='stat')
1051 return patchcmds(ui, repo, pats, opts, subcommand='stat')
1051 else:
1052 else:
1052 return createcmd(ui, repo, pats, opts)
1053 return createcmd(ui, repo, pats, opts)
1053
1054
1054 def extsetup(ui):
1055 def extsetup(ui):
1055 cmdutil.unfinishedstates.append(
1056 cmdutil.unfinishedstates.append(
1056 [shelvedstate._filename, False, False,
1057 [shelvedstate._filename, False, False,
1057 _('unshelve already in progress'),
1058 _('unshelve already in progress'),
1058 _("use 'hg unshelve --continue' or 'hg unshelve --abort'")])
1059 _("use 'hg unshelve --continue' or 'hg unshelve --abort'")])
1059 cmdutil.afterresolvedstates.append(
1060 cmdutil.afterresolvedstates.append(
1060 [shelvedstate._filename, _('hg unshelve --continue')])
1061 [shelvedstate._filename, _('hg unshelve --continue')])
@@ -1,593 +1,594 b''
1 # chgserver.py - command server extension for cHg
1 # chgserver.py - command server extension for cHg
2 #
2 #
3 # Copyright 2011 Yuya Nishihara <yuya@tcha.org>
3 # Copyright 2011 Yuya Nishihara <yuya@tcha.org>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """command server extension for cHg
8 """command server extension for cHg
9
9
10 'S' channel (read/write)
10 'S' channel (read/write)
11 propagate ui.system() request to client
11 propagate ui.system() request to client
12
12
13 'attachio' command
13 'attachio' command
14 attach client's stdio passed by sendmsg()
14 attach client's stdio passed by sendmsg()
15
15
16 'chdir' command
16 'chdir' command
17 change current directory
17 change current directory
18
18
19 'setenv' command
19 'setenv' command
20 replace os.environ completely
20 replace os.environ completely
21
21
22 'setumask' command
22 'setumask' command
23 set umask
23 set umask
24
24
25 'validate' command
25 'validate' command
26 reload the config and check if the server is up to date
26 reload the config and check if the server is up to date
27
27
28 Config
28 Config
29 ------
29 ------
30
30
31 ::
31 ::
32
32
33 [chgserver]
33 [chgserver]
34 # how long (in seconds) should an idle chg server exit
34 # how long (in seconds) should an idle chg server exit
35 idletimeout = 3600
35 idletimeout = 3600
36
36
37 # whether to skip config or env change checks
37 # whether to skip config or env change checks
38 skiphash = False
38 skiphash = False
39 """
39 """
40
40
41 from __future__ import absolute_import
41 from __future__ import absolute_import
42
42
43 import hashlib
43 import hashlib
44 import inspect
44 import inspect
45 import os
45 import os
46 import re
46 import re
47 import socket
47 import socket
48 import stat
48 import struct
49 import struct
49 import time
50 import time
50
51
51 from .i18n import _
52 from .i18n import _
52
53
53 from . import (
54 from . import (
54 commandserver,
55 commandserver,
55 encoding,
56 encoding,
56 error,
57 error,
57 extensions,
58 extensions,
58 node,
59 node,
59 pycompat,
60 pycompat,
60 util,
61 util,
61 )
62 )
62
63
63 _log = commandserver.log
64 _log = commandserver.log
64
65
65 def _hashlist(items):
66 def _hashlist(items):
66 """return sha1 hexdigest for a list"""
67 """return sha1 hexdigest for a list"""
67 return node.hex(hashlib.sha1(str(items)).digest())
68 return node.hex(hashlib.sha1(str(items)).digest())
68
69
69 # sensitive config sections affecting confighash
70 # sensitive config sections affecting confighash
70 _configsections = [
71 _configsections = [
71 'alias', # affects global state commands.table
72 'alias', # affects global state commands.table
72 'eol', # uses setconfig('eol', ...)
73 'eol', # uses setconfig('eol', ...)
73 'extdiff', # uisetup will register new commands
74 'extdiff', # uisetup will register new commands
74 'extensions',
75 'extensions',
75 ]
76 ]
76
77
77 _configsectionitems = [
78 _configsectionitems = [
78 ('commands', 'show.aliasprefix'), # show.py reads it in extsetup
79 ('commands', 'show.aliasprefix'), # show.py reads it in extsetup
79 ]
80 ]
80
81
81 # sensitive environment variables affecting confighash
82 # sensitive environment variables affecting confighash
82 _envre = re.compile(r'''\A(?:
83 _envre = re.compile(r'''\A(?:
83 CHGHG
84 CHGHG
84 |HG(?:DEMANDIMPORT|EMITWARNINGS|MODULEPOLICY|PROF|RCPATH)?
85 |HG(?:DEMANDIMPORT|EMITWARNINGS|MODULEPOLICY|PROF|RCPATH)?
85 |HG(?:ENCODING|PLAIN).*
86 |HG(?:ENCODING|PLAIN).*
86 |LANG(?:UAGE)?
87 |LANG(?:UAGE)?
87 |LC_.*
88 |LC_.*
88 |LD_.*
89 |LD_.*
89 |PATH
90 |PATH
90 |PYTHON.*
91 |PYTHON.*
91 |TERM(?:INFO)?
92 |TERM(?:INFO)?
92 |TZ
93 |TZ
93 )\Z''', re.X)
94 )\Z''', re.X)
94
95
95 def _confighash(ui):
96 def _confighash(ui):
96 """return a quick hash for detecting config/env changes
97 """return a quick hash for detecting config/env changes
97
98
98 confighash is the hash of sensitive config items and environment variables.
99 confighash is the hash of sensitive config items and environment variables.
99
100
100 for chgserver, it is designed that once confighash changes, the server is
101 for chgserver, it is designed that once confighash changes, the server is
101 not qualified to serve its client and should redirect the client to a new
102 not qualified to serve its client and should redirect the client to a new
102 server. different from mtimehash, confighash change will not mark the
103 server. different from mtimehash, confighash change will not mark the
103 server outdated and exit since the user can have different configs at the
104 server outdated and exit since the user can have different configs at the
104 same time.
105 same time.
105 """
106 """
106 sectionitems = []
107 sectionitems = []
107 for section in _configsections:
108 for section in _configsections:
108 sectionitems.append(ui.configitems(section))
109 sectionitems.append(ui.configitems(section))
109 for section, item in _configsectionitems:
110 for section, item in _configsectionitems:
110 sectionitems.append(ui.config(section, item))
111 sectionitems.append(ui.config(section, item))
111 sectionhash = _hashlist(sectionitems)
112 sectionhash = _hashlist(sectionitems)
112 # If $CHGHG is set, the change to $HG should not trigger a new chg server
113 # If $CHGHG is set, the change to $HG should not trigger a new chg server
113 if 'CHGHG' in encoding.environ:
114 if 'CHGHG' in encoding.environ:
114 ignored = {'HG'}
115 ignored = {'HG'}
115 else:
116 else:
116 ignored = set()
117 ignored = set()
117 envitems = [(k, v) for k, v in encoding.environ.iteritems()
118 envitems = [(k, v) for k, v in encoding.environ.iteritems()
118 if _envre.match(k) and k not in ignored]
119 if _envre.match(k) and k not in ignored]
119 envhash = _hashlist(sorted(envitems))
120 envhash = _hashlist(sorted(envitems))
120 return sectionhash[:6] + envhash[:6]
121 return sectionhash[:6] + envhash[:6]
121
122
122 def _getmtimepaths(ui):
123 def _getmtimepaths(ui):
123 """get a list of paths that should be checked to detect change
124 """get a list of paths that should be checked to detect change
124
125
125 The list will include:
126 The list will include:
126 - extensions (will not cover all files for complex extensions)
127 - extensions (will not cover all files for complex extensions)
127 - mercurial/__version__.py
128 - mercurial/__version__.py
128 - python binary
129 - python binary
129 """
130 """
130 modules = [m for n, m in extensions.extensions(ui)]
131 modules = [m for n, m in extensions.extensions(ui)]
131 try:
132 try:
132 from . import __version__
133 from . import __version__
133 modules.append(__version__)
134 modules.append(__version__)
134 except ImportError:
135 except ImportError:
135 pass
136 pass
136 files = [pycompat.sysexecutable]
137 files = [pycompat.sysexecutable]
137 for m in modules:
138 for m in modules:
138 try:
139 try:
139 files.append(inspect.getabsfile(m))
140 files.append(inspect.getabsfile(m))
140 except TypeError:
141 except TypeError:
141 pass
142 pass
142 return sorted(set(files))
143 return sorted(set(files))
143
144
144 def _mtimehash(paths):
145 def _mtimehash(paths):
145 """return a quick hash for detecting file changes
146 """return a quick hash for detecting file changes
146
147
147 mtimehash calls stat on given paths and calculate a hash based on size and
148 mtimehash calls stat on given paths and calculate a hash based on size and
148 mtime of each file. mtimehash does not read file content because reading is
149 mtime of each file. mtimehash does not read file content because reading is
149 expensive. therefore it's not 100% reliable for detecting content changes.
150 expensive. therefore it's not 100% reliable for detecting content changes.
150 it's possible to return different hashes for same file contents.
151 it's possible to return different hashes for same file contents.
151 it's also possible to return a same hash for different file contents for
152 it's also possible to return a same hash for different file contents for
152 some carefully crafted situation.
153 some carefully crafted situation.
153
154
154 for chgserver, it is designed that once mtimehash changes, the server is
155 for chgserver, it is designed that once mtimehash changes, the server is
155 considered outdated immediately and should no longer provide service.
156 considered outdated immediately and should no longer provide service.
156
157
157 mtimehash is not included in confighash because we only know the paths of
158 mtimehash is not included in confighash because we only know the paths of
158 extensions after importing them (there is imp.find_module but that faces
159 extensions after importing them (there is imp.find_module but that faces
159 race conditions). We need to calculate confighash without importing.
160 race conditions). We need to calculate confighash without importing.
160 """
161 """
161 def trystat(path):
162 def trystat(path):
162 try:
163 try:
163 st = os.stat(path)
164 st = os.stat(path)
164 return (st.st_mtime, st.st_size)
165 return (st[stat.ST_MTIME], st.st_size)
165 except OSError:
166 except OSError:
166 # could be ENOENT, EPERM etc. not fatal in any case
167 # could be ENOENT, EPERM etc. not fatal in any case
167 pass
168 pass
168 return _hashlist(map(trystat, paths))[:12]
169 return _hashlist(map(trystat, paths))[:12]
169
170
170 class hashstate(object):
171 class hashstate(object):
171 """a structure storing confighash, mtimehash, paths used for mtimehash"""
172 """a structure storing confighash, mtimehash, paths used for mtimehash"""
172 def __init__(self, confighash, mtimehash, mtimepaths):
173 def __init__(self, confighash, mtimehash, mtimepaths):
173 self.confighash = confighash
174 self.confighash = confighash
174 self.mtimehash = mtimehash
175 self.mtimehash = mtimehash
175 self.mtimepaths = mtimepaths
176 self.mtimepaths = mtimepaths
176
177
177 @staticmethod
178 @staticmethod
178 def fromui(ui, mtimepaths=None):
179 def fromui(ui, mtimepaths=None):
179 if mtimepaths is None:
180 if mtimepaths is None:
180 mtimepaths = _getmtimepaths(ui)
181 mtimepaths = _getmtimepaths(ui)
181 confighash = _confighash(ui)
182 confighash = _confighash(ui)
182 mtimehash = _mtimehash(mtimepaths)
183 mtimehash = _mtimehash(mtimepaths)
183 _log('confighash = %s mtimehash = %s\n' % (confighash, mtimehash))
184 _log('confighash = %s mtimehash = %s\n' % (confighash, mtimehash))
184 return hashstate(confighash, mtimehash, mtimepaths)
185 return hashstate(confighash, mtimehash, mtimepaths)
185
186
186 def _newchgui(srcui, csystem, attachio):
187 def _newchgui(srcui, csystem, attachio):
187 class chgui(srcui.__class__):
188 class chgui(srcui.__class__):
188 def __init__(self, src=None):
189 def __init__(self, src=None):
189 super(chgui, self).__init__(src)
190 super(chgui, self).__init__(src)
190 if src:
191 if src:
191 self._csystem = getattr(src, '_csystem', csystem)
192 self._csystem = getattr(src, '_csystem', csystem)
192 else:
193 else:
193 self._csystem = csystem
194 self._csystem = csystem
194
195
195 def _runsystem(self, cmd, environ, cwd, out):
196 def _runsystem(self, cmd, environ, cwd, out):
196 # fallback to the original system method if the output needs to be
197 # fallback to the original system method if the output needs to be
197 # captured (to self._buffers), or the output stream is not stdout
198 # captured (to self._buffers), or the output stream is not stdout
198 # (e.g. stderr, cStringIO), because the chg client is not aware of
199 # (e.g. stderr, cStringIO), because the chg client is not aware of
199 # these situations and will behave differently (write to stdout).
200 # these situations and will behave differently (write to stdout).
200 if (out is not self.fout
201 if (out is not self.fout
201 or not util.safehasattr(self.fout, 'fileno')
202 or not util.safehasattr(self.fout, 'fileno')
202 or self.fout.fileno() != util.stdout.fileno()):
203 or self.fout.fileno() != util.stdout.fileno()):
203 return util.system(cmd, environ=environ, cwd=cwd, out=out)
204 return util.system(cmd, environ=environ, cwd=cwd, out=out)
204 self.flush()
205 self.flush()
205 return self._csystem(cmd, util.shellenviron(environ), cwd)
206 return self._csystem(cmd, util.shellenviron(environ), cwd)
206
207
207 def _runpager(self, cmd, env=None):
208 def _runpager(self, cmd, env=None):
208 self._csystem(cmd, util.shellenviron(env), type='pager',
209 self._csystem(cmd, util.shellenviron(env), type='pager',
209 cmdtable={'attachio': attachio})
210 cmdtable={'attachio': attachio})
210 return True
211 return True
211
212
212 return chgui(srcui)
213 return chgui(srcui)
213
214
214 def _loadnewui(srcui, args):
215 def _loadnewui(srcui, args):
215 from . import dispatch # avoid cycle
216 from . import dispatch # avoid cycle
216
217
217 newui = srcui.__class__.load()
218 newui = srcui.__class__.load()
218 for a in ['fin', 'fout', 'ferr', 'environ']:
219 for a in ['fin', 'fout', 'ferr', 'environ']:
219 setattr(newui, a, getattr(srcui, a))
220 setattr(newui, a, getattr(srcui, a))
220 if util.safehasattr(srcui, '_csystem'):
221 if util.safehasattr(srcui, '_csystem'):
221 newui._csystem = srcui._csystem
222 newui._csystem = srcui._csystem
222
223
223 # command line args
224 # command line args
224 options = dispatch._earlyparseopts(newui, args)
225 options = dispatch._earlyparseopts(newui, args)
225 dispatch._parseconfig(newui, options['config'])
226 dispatch._parseconfig(newui, options['config'])
226
227
227 # stolen from tortoisehg.util.copydynamicconfig()
228 # stolen from tortoisehg.util.copydynamicconfig()
228 for section, name, value in srcui.walkconfig():
229 for section, name, value in srcui.walkconfig():
229 source = srcui.configsource(section, name)
230 source = srcui.configsource(section, name)
230 if ':' in source or source == '--config' or source.startswith('$'):
231 if ':' in source or source == '--config' or source.startswith('$'):
231 # path:line or command line, or environ
232 # path:line or command line, or environ
232 continue
233 continue
233 newui.setconfig(section, name, value, source)
234 newui.setconfig(section, name, value, source)
234
235
235 # load wd and repo config, copied from dispatch.py
236 # load wd and repo config, copied from dispatch.py
236 cwd = options['cwd']
237 cwd = options['cwd']
237 cwd = cwd and os.path.realpath(cwd) or None
238 cwd = cwd and os.path.realpath(cwd) or None
238 rpath = options['repository']
239 rpath = options['repository']
239 path, newlui = dispatch._getlocal(newui, rpath, wd=cwd)
240 path, newlui = dispatch._getlocal(newui, rpath, wd=cwd)
240
241
241 return (newui, newlui)
242 return (newui, newlui)
242
243
243 class channeledsystem(object):
244 class channeledsystem(object):
244 """Propagate ui.system() request in the following format:
245 """Propagate ui.system() request in the following format:
245
246
246 payload length (unsigned int),
247 payload length (unsigned int),
247 type, '\0',
248 type, '\0',
248 cmd, '\0',
249 cmd, '\0',
249 cwd, '\0',
250 cwd, '\0',
250 envkey, '=', val, '\0',
251 envkey, '=', val, '\0',
251 ...
252 ...
252 envkey, '=', val
253 envkey, '=', val
253
254
254 if type == 'system', waits for:
255 if type == 'system', waits for:
255
256
256 exitcode length (unsigned int),
257 exitcode length (unsigned int),
257 exitcode (int)
258 exitcode (int)
258
259
259 if type == 'pager', repetitively waits for a command name ending with '\n'
260 if type == 'pager', repetitively waits for a command name ending with '\n'
260 and executes it defined by cmdtable, or exits the loop if the command name
261 and executes it defined by cmdtable, or exits the loop if the command name
261 is empty.
262 is empty.
262 """
263 """
263 def __init__(self, in_, out, channel):
264 def __init__(self, in_, out, channel):
264 self.in_ = in_
265 self.in_ = in_
265 self.out = out
266 self.out = out
266 self.channel = channel
267 self.channel = channel
267
268
268 def __call__(self, cmd, environ, cwd=None, type='system', cmdtable=None):
269 def __call__(self, cmd, environ, cwd=None, type='system', cmdtable=None):
269 args = [type, util.quotecommand(cmd), os.path.abspath(cwd or '.')]
270 args = [type, util.quotecommand(cmd), os.path.abspath(cwd or '.')]
270 args.extend('%s=%s' % (k, v) for k, v in environ.iteritems())
271 args.extend('%s=%s' % (k, v) for k, v in environ.iteritems())
271 data = '\0'.join(args)
272 data = '\0'.join(args)
272 self.out.write(struct.pack('>cI', self.channel, len(data)))
273 self.out.write(struct.pack('>cI', self.channel, len(data)))
273 self.out.write(data)
274 self.out.write(data)
274 self.out.flush()
275 self.out.flush()
275
276
276 if type == 'system':
277 if type == 'system':
277 length = self.in_.read(4)
278 length = self.in_.read(4)
278 length, = struct.unpack('>I', length)
279 length, = struct.unpack('>I', length)
279 if length != 4:
280 if length != 4:
280 raise error.Abort(_('invalid response'))
281 raise error.Abort(_('invalid response'))
281 rc, = struct.unpack('>i', self.in_.read(4))
282 rc, = struct.unpack('>i', self.in_.read(4))
282 return rc
283 return rc
283 elif type == 'pager':
284 elif type == 'pager':
284 while True:
285 while True:
285 cmd = self.in_.readline()[:-1]
286 cmd = self.in_.readline()[:-1]
286 if not cmd:
287 if not cmd:
287 break
288 break
288 if cmdtable and cmd in cmdtable:
289 if cmdtable and cmd in cmdtable:
289 _log('pager subcommand: %s' % cmd)
290 _log('pager subcommand: %s' % cmd)
290 cmdtable[cmd]()
291 cmdtable[cmd]()
291 else:
292 else:
292 raise error.Abort(_('unexpected command: %s') % cmd)
293 raise error.Abort(_('unexpected command: %s') % cmd)
293 else:
294 else:
294 raise error.ProgrammingError('invalid S channel type: %s' % type)
295 raise error.ProgrammingError('invalid S channel type: %s' % type)
295
296
296 _iochannels = [
297 _iochannels = [
297 # server.ch, ui.fp, mode
298 # server.ch, ui.fp, mode
298 ('cin', 'fin', pycompat.sysstr('rb')),
299 ('cin', 'fin', pycompat.sysstr('rb')),
299 ('cout', 'fout', pycompat.sysstr('wb')),
300 ('cout', 'fout', pycompat.sysstr('wb')),
300 ('cerr', 'ferr', pycompat.sysstr('wb')),
301 ('cerr', 'ferr', pycompat.sysstr('wb')),
301 ]
302 ]
302
303
303 class chgcmdserver(commandserver.server):
304 class chgcmdserver(commandserver.server):
304 def __init__(self, ui, repo, fin, fout, sock, hashstate, baseaddress):
305 def __init__(self, ui, repo, fin, fout, sock, hashstate, baseaddress):
305 super(chgcmdserver, self).__init__(
306 super(chgcmdserver, self).__init__(
306 _newchgui(ui, channeledsystem(fin, fout, 'S'), self.attachio),
307 _newchgui(ui, channeledsystem(fin, fout, 'S'), self.attachio),
307 repo, fin, fout)
308 repo, fin, fout)
308 self.clientsock = sock
309 self.clientsock = sock
309 self._oldios = [] # original (self.ch, ui.fp, fd) before "attachio"
310 self._oldios = [] # original (self.ch, ui.fp, fd) before "attachio"
310 self.hashstate = hashstate
311 self.hashstate = hashstate
311 self.baseaddress = baseaddress
312 self.baseaddress = baseaddress
312 if hashstate is not None:
313 if hashstate is not None:
313 self.capabilities = self.capabilities.copy()
314 self.capabilities = self.capabilities.copy()
314 self.capabilities['validate'] = chgcmdserver.validate
315 self.capabilities['validate'] = chgcmdserver.validate
315
316
316 def cleanup(self):
317 def cleanup(self):
317 super(chgcmdserver, self).cleanup()
318 super(chgcmdserver, self).cleanup()
318 # dispatch._runcatch() does not flush outputs if exception is not
319 # dispatch._runcatch() does not flush outputs if exception is not
319 # handled by dispatch._dispatch()
320 # handled by dispatch._dispatch()
320 self.ui.flush()
321 self.ui.flush()
321 self._restoreio()
322 self._restoreio()
322
323
323 def attachio(self):
324 def attachio(self):
324 """Attach to client's stdio passed via unix domain socket; all
325 """Attach to client's stdio passed via unix domain socket; all
325 channels except cresult will no longer be used
326 channels except cresult will no longer be used
326 """
327 """
327 # tell client to sendmsg() with 1-byte payload, which makes it
328 # tell client to sendmsg() with 1-byte payload, which makes it
328 # distinctive from "attachio\n" command consumed by client.read()
329 # distinctive from "attachio\n" command consumed by client.read()
329 self.clientsock.sendall(struct.pack('>cI', 'I', 1))
330 self.clientsock.sendall(struct.pack('>cI', 'I', 1))
330 clientfds = util.recvfds(self.clientsock.fileno())
331 clientfds = util.recvfds(self.clientsock.fileno())
331 _log('received fds: %r\n' % clientfds)
332 _log('received fds: %r\n' % clientfds)
332
333
333 ui = self.ui
334 ui = self.ui
334 ui.flush()
335 ui.flush()
335 first = self._saveio()
336 first = self._saveio()
336 for fd, (cn, fn, mode) in zip(clientfds, _iochannels):
337 for fd, (cn, fn, mode) in zip(clientfds, _iochannels):
337 assert fd > 0
338 assert fd > 0
338 fp = getattr(ui, fn)
339 fp = getattr(ui, fn)
339 os.dup2(fd, fp.fileno())
340 os.dup2(fd, fp.fileno())
340 os.close(fd)
341 os.close(fd)
341 if not first:
342 if not first:
342 continue
343 continue
343 # reset buffering mode when client is first attached. as we want
344 # reset buffering mode when client is first attached. as we want
344 # to see output immediately on pager, the mode stays unchanged
345 # to see output immediately on pager, the mode stays unchanged
345 # when client re-attached. ferr is unchanged because it should
346 # when client re-attached. ferr is unchanged because it should
346 # be unbuffered no matter if it is a tty or not.
347 # be unbuffered no matter if it is a tty or not.
347 if fn == 'ferr':
348 if fn == 'ferr':
348 newfp = fp
349 newfp = fp
349 else:
350 else:
350 # make it line buffered explicitly because the default is
351 # make it line buffered explicitly because the default is
351 # decided on first write(), where fout could be a pager.
352 # decided on first write(), where fout could be a pager.
352 if fp.isatty():
353 if fp.isatty():
353 bufsize = 1 # line buffered
354 bufsize = 1 # line buffered
354 else:
355 else:
355 bufsize = -1 # system default
356 bufsize = -1 # system default
356 newfp = os.fdopen(fp.fileno(), mode, bufsize)
357 newfp = os.fdopen(fp.fileno(), mode, bufsize)
357 setattr(ui, fn, newfp)
358 setattr(ui, fn, newfp)
358 setattr(self, cn, newfp)
359 setattr(self, cn, newfp)
359
360
360 self.cresult.write(struct.pack('>i', len(clientfds)))
361 self.cresult.write(struct.pack('>i', len(clientfds)))
361
362
362 def _saveio(self):
363 def _saveio(self):
363 if self._oldios:
364 if self._oldios:
364 return False
365 return False
365 ui = self.ui
366 ui = self.ui
366 for cn, fn, _mode in _iochannels:
367 for cn, fn, _mode in _iochannels:
367 ch = getattr(self, cn)
368 ch = getattr(self, cn)
368 fp = getattr(ui, fn)
369 fp = getattr(ui, fn)
369 fd = os.dup(fp.fileno())
370 fd = os.dup(fp.fileno())
370 self._oldios.append((ch, fp, fd))
371 self._oldios.append((ch, fp, fd))
371 return True
372 return True
372
373
373 def _restoreio(self):
374 def _restoreio(self):
374 ui = self.ui
375 ui = self.ui
375 for (ch, fp, fd), (cn, fn, _mode) in zip(self._oldios, _iochannels):
376 for (ch, fp, fd), (cn, fn, _mode) in zip(self._oldios, _iochannels):
376 newfp = getattr(ui, fn)
377 newfp = getattr(ui, fn)
377 # close newfp while it's associated with client; otherwise it
378 # close newfp while it's associated with client; otherwise it
378 # would be closed when newfp is deleted
379 # would be closed when newfp is deleted
379 if newfp is not fp:
380 if newfp is not fp:
380 newfp.close()
381 newfp.close()
381 # restore original fd: fp is open again
382 # restore original fd: fp is open again
382 os.dup2(fd, fp.fileno())
383 os.dup2(fd, fp.fileno())
383 os.close(fd)
384 os.close(fd)
384 setattr(self, cn, ch)
385 setattr(self, cn, ch)
385 setattr(ui, fn, fp)
386 setattr(ui, fn, fp)
386 del self._oldios[:]
387 del self._oldios[:]
387
388
388 def validate(self):
389 def validate(self):
389 """Reload the config and check if the server is up to date
390 """Reload the config and check if the server is up to date
390
391
391 Read a list of '\0' separated arguments.
392 Read a list of '\0' separated arguments.
392 Write a non-empty list of '\0' separated instruction strings or '\0'
393 Write a non-empty list of '\0' separated instruction strings or '\0'
393 if the list is empty.
394 if the list is empty.
394 An instruction string could be either:
395 An instruction string could be either:
395 - "unlink $path", the client should unlink the path to stop the
396 - "unlink $path", the client should unlink the path to stop the
396 outdated server.
397 outdated server.
397 - "redirect $path", the client should attempt to connect to $path
398 - "redirect $path", the client should attempt to connect to $path
398 first. If it does not work, start a new server. It implies
399 first. If it does not work, start a new server. It implies
399 "reconnect".
400 "reconnect".
400 - "exit $n", the client should exit directly with code n.
401 - "exit $n", the client should exit directly with code n.
401 This may happen if we cannot parse the config.
402 This may happen if we cannot parse the config.
402 - "reconnect", the client should close the connection and
403 - "reconnect", the client should close the connection and
403 reconnect.
404 reconnect.
404 If neither "reconnect" nor "redirect" is included in the instruction
405 If neither "reconnect" nor "redirect" is included in the instruction
405 list, the client can continue with this server after completing all
406 list, the client can continue with this server after completing all
406 the instructions.
407 the instructions.
407 """
408 """
408 from . import dispatch # avoid cycle
409 from . import dispatch # avoid cycle
409
410
410 args = self._readlist()
411 args = self._readlist()
411 try:
412 try:
412 self.ui, lui = _loadnewui(self.ui, args)
413 self.ui, lui = _loadnewui(self.ui, args)
413 except error.ParseError as inst:
414 except error.ParseError as inst:
414 dispatch._formatparse(self.ui.warn, inst)
415 dispatch._formatparse(self.ui.warn, inst)
415 self.ui.flush()
416 self.ui.flush()
416 self.cresult.write('exit 255')
417 self.cresult.write('exit 255')
417 return
418 return
418 newhash = hashstate.fromui(lui, self.hashstate.mtimepaths)
419 newhash = hashstate.fromui(lui, self.hashstate.mtimepaths)
419 insts = []
420 insts = []
420 if newhash.mtimehash != self.hashstate.mtimehash:
421 if newhash.mtimehash != self.hashstate.mtimehash:
421 addr = _hashaddress(self.baseaddress, self.hashstate.confighash)
422 addr = _hashaddress(self.baseaddress, self.hashstate.confighash)
422 insts.append('unlink %s' % addr)
423 insts.append('unlink %s' % addr)
423 # mtimehash is empty if one or more extensions fail to load.
424 # mtimehash is empty if one or more extensions fail to load.
424 # to be compatible with hg, still serve the client this time.
425 # to be compatible with hg, still serve the client this time.
425 if self.hashstate.mtimehash:
426 if self.hashstate.mtimehash:
426 insts.append('reconnect')
427 insts.append('reconnect')
427 if newhash.confighash != self.hashstate.confighash:
428 if newhash.confighash != self.hashstate.confighash:
428 addr = _hashaddress(self.baseaddress, newhash.confighash)
429 addr = _hashaddress(self.baseaddress, newhash.confighash)
429 insts.append('redirect %s' % addr)
430 insts.append('redirect %s' % addr)
430 _log('validate: %s\n' % insts)
431 _log('validate: %s\n' % insts)
431 self.cresult.write('\0'.join(insts) or '\0')
432 self.cresult.write('\0'.join(insts) or '\0')
432
433
433 def chdir(self):
434 def chdir(self):
434 """Change current directory
435 """Change current directory
435
436
436 Note that the behavior of --cwd option is bit different from this.
437 Note that the behavior of --cwd option is bit different from this.
437 It does not affect --config parameter.
438 It does not affect --config parameter.
438 """
439 """
439 path = self._readstr()
440 path = self._readstr()
440 if not path:
441 if not path:
441 return
442 return
442 _log('chdir to %r\n' % path)
443 _log('chdir to %r\n' % path)
443 os.chdir(path)
444 os.chdir(path)
444
445
445 def setumask(self):
446 def setumask(self):
446 """Change umask"""
447 """Change umask"""
447 mask = struct.unpack('>I', self._read(4))[0]
448 mask = struct.unpack('>I', self._read(4))[0]
448 _log('setumask %r\n' % mask)
449 _log('setumask %r\n' % mask)
449 os.umask(mask)
450 os.umask(mask)
450
451
451 def runcommand(self):
452 def runcommand(self):
452 return super(chgcmdserver, self).runcommand()
453 return super(chgcmdserver, self).runcommand()
453
454
454 def setenv(self):
455 def setenv(self):
455 """Clear and update os.environ
456 """Clear and update os.environ
456
457
457 Note that not all variables can make an effect on the running process.
458 Note that not all variables can make an effect on the running process.
458 """
459 """
459 l = self._readlist()
460 l = self._readlist()
460 try:
461 try:
461 newenv = dict(s.split('=', 1) for s in l)
462 newenv = dict(s.split('=', 1) for s in l)
462 except ValueError:
463 except ValueError:
463 raise ValueError('unexpected value in setenv request')
464 raise ValueError('unexpected value in setenv request')
464 _log('setenv: %r\n' % sorted(newenv.keys()))
465 _log('setenv: %r\n' % sorted(newenv.keys()))
465 encoding.environ.clear()
466 encoding.environ.clear()
466 encoding.environ.update(newenv)
467 encoding.environ.update(newenv)
467
468
468 capabilities = commandserver.server.capabilities.copy()
469 capabilities = commandserver.server.capabilities.copy()
469 capabilities.update({'attachio': attachio,
470 capabilities.update({'attachio': attachio,
470 'chdir': chdir,
471 'chdir': chdir,
471 'runcommand': runcommand,
472 'runcommand': runcommand,
472 'setenv': setenv,
473 'setenv': setenv,
473 'setumask': setumask})
474 'setumask': setumask})
474
475
475 if util.safehasattr(util, 'setprocname'):
476 if util.safehasattr(util, 'setprocname'):
476 def setprocname(self):
477 def setprocname(self):
477 """Change process title"""
478 """Change process title"""
478 name = self._readstr()
479 name = self._readstr()
479 _log('setprocname: %r\n' % name)
480 _log('setprocname: %r\n' % name)
480 util.setprocname(name)
481 util.setprocname(name)
481 capabilities['setprocname'] = setprocname
482 capabilities['setprocname'] = setprocname
482
483
483 def _tempaddress(address):
484 def _tempaddress(address):
484 return '%s.%d.tmp' % (address, os.getpid())
485 return '%s.%d.tmp' % (address, os.getpid())
485
486
486 def _hashaddress(address, hashstr):
487 def _hashaddress(address, hashstr):
487 # if the basename of address contains '.', use only the left part. this
488 # if the basename of address contains '.', use only the left part. this
488 # makes it possible for the client to pass 'server.tmp$PID' and follow by
489 # makes it possible for the client to pass 'server.tmp$PID' and follow by
489 # an atomic rename to avoid locking when spawning new servers.
490 # an atomic rename to avoid locking when spawning new servers.
490 dirname, basename = os.path.split(address)
491 dirname, basename = os.path.split(address)
491 basename = basename.split('.', 1)[0]
492 basename = basename.split('.', 1)[0]
492 return '%s-%s' % (os.path.join(dirname, basename), hashstr)
493 return '%s-%s' % (os.path.join(dirname, basename), hashstr)
493
494
494 class chgunixservicehandler(object):
495 class chgunixservicehandler(object):
495 """Set of operations for chg services"""
496 """Set of operations for chg services"""
496
497
497 pollinterval = 1 # [sec]
498 pollinterval = 1 # [sec]
498
499
499 def __init__(self, ui):
500 def __init__(self, ui):
500 self.ui = ui
501 self.ui = ui
501 self._idletimeout = ui.configint('chgserver', 'idletimeout')
502 self._idletimeout = ui.configint('chgserver', 'idletimeout')
502 self._lastactive = time.time()
503 self._lastactive = time.time()
503
504
504 def bindsocket(self, sock, address):
505 def bindsocket(self, sock, address):
505 self._inithashstate(address)
506 self._inithashstate(address)
506 self._checkextensions()
507 self._checkextensions()
507 self._bind(sock)
508 self._bind(sock)
508 self._createsymlink()
509 self._createsymlink()
509 # no "listening at" message should be printed to simulate hg behavior
510 # no "listening at" message should be printed to simulate hg behavior
510
511
511 def _inithashstate(self, address):
512 def _inithashstate(self, address):
512 self._baseaddress = address
513 self._baseaddress = address
513 if self.ui.configbool('chgserver', 'skiphash'):
514 if self.ui.configbool('chgserver', 'skiphash'):
514 self._hashstate = None
515 self._hashstate = None
515 self._realaddress = address
516 self._realaddress = address
516 return
517 return
517 self._hashstate = hashstate.fromui(self.ui)
518 self._hashstate = hashstate.fromui(self.ui)
518 self._realaddress = _hashaddress(address, self._hashstate.confighash)
519 self._realaddress = _hashaddress(address, self._hashstate.confighash)
519
520
520 def _checkextensions(self):
521 def _checkextensions(self):
521 if not self._hashstate:
522 if not self._hashstate:
522 return
523 return
523 if extensions.notloaded():
524 if extensions.notloaded():
524 # one or more extensions failed to load. mtimehash becomes
525 # one or more extensions failed to load. mtimehash becomes
525 # meaningless because we do not know the paths of those extensions.
526 # meaningless because we do not know the paths of those extensions.
526 # set mtimehash to an illegal hash value to invalidate the server.
527 # set mtimehash to an illegal hash value to invalidate the server.
527 self._hashstate.mtimehash = ''
528 self._hashstate.mtimehash = ''
528
529
529 def _bind(self, sock):
530 def _bind(self, sock):
530 # use a unique temp address so we can stat the file and do ownership
531 # use a unique temp address so we can stat the file and do ownership
531 # check later
532 # check later
532 tempaddress = _tempaddress(self._realaddress)
533 tempaddress = _tempaddress(self._realaddress)
533 util.bindunixsocket(sock, tempaddress)
534 util.bindunixsocket(sock, tempaddress)
534 self._socketstat = os.stat(tempaddress)
535 self._socketstat = os.stat(tempaddress)
535 sock.listen(socket.SOMAXCONN)
536 sock.listen(socket.SOMAXCONN)
536 # rename will replace the old socket file if exists atomically. the
537 # rename will replace the old socket file if exists atomically. the
537 # old server will detect ownership change and exit.
538 # old server will detect ownership change and exit.
538 util.rename(tempaddress, self._realaddress)
539 util.rename(tempaddress, self._realaddress)
539
540
540 def _createsymlink(self):
541 def _createsymlink(self):
541 if self._baseaddress == self._realaddress:
542 if self._baseaddress == self._realaddress:
542 return
543 return
543 tempaddress = _tempaddress(self._baseaddress)
544 tempaddress = _tempaddress(self._baseaddress)
544 os.symlink(os.path.basename(self._realaddress), tempaddress)
545 os.symlink(os.path.basename(self._realaddress), tempaddress)
545 util.rename(tempaddress, self._baseaddress)
546 util.rename(tempaddress, self._baseaddress)
546
547
547 def _issocketowner(self):
548 def _issocketowner(self):
548 try:
549 try:
549 stat = os.stat(self._realaddress)
550 st = os.stat(self._realaddress)
550 return (stat.st_ino == self._socketstat.st_ino and
551 return (st.st_ino == self._socketstat.st_ino and
551 stat.st_mtime == self._socketstat.st_mtime)
552 st[stat.ST_MTIME] == self._socketstat[stat.ST_MTIME])
552 except OSError:
553 except OSError:
553 return False
554 return False
554
555
555 def unlinksocket(self, address):
556 def unlinksocket(self, address):
556 if not self._issocketowner():
557 if not self._issocketowner():
557 return
558 return
558 # it is possible to have a race condition here that we may
559 # it is possible to have a race condition here that we may
559 # remove another server's socket file. but that's okay
560 # remove another server's socket file. but that's okay
560 # since that server will detect and exit automatically and
561 # since that server will detect and exit automatically and
561 # the client will start a new server on demand.
562 # the client will start a new server on demand.
562 util.tryunlink(self._realaddress)
563 util.tryunlink(self._realaddress)
563
564
564 def shouldexit(self):
565 def shouldexit(self):
565 if not self._issocketowner():
566 if not self._issocketowner():
566 self.ui.debug('%s is not owned, exiting.\n' % self._realaddress)
567 self.ui.debug('%s is not owned, exiting.\n' % self._realaddress)
567 return True
568 return True
568 if time.time() - self._lastactive > self._idletimeout:
569 if time.time() - self._lastactive > self._idletimeout:
569 self.ui.debug('being idle too long. exiting.\n')
570 self.ui.debug('being idle too long. exiting.\n')
570 return True
571 return True
571 return False
572 return False
572
573
573 def newconnection(self):
574 def newconnection(self):
574 self._lastactive = time.time()
575 self._lastactive = time.time()
575
576
576 def createcmdserver(self, repo, conn, fin, fout):
577 def createcmdserver(self, repo, conn, fin, fout):
577 return chgcmdserver(self.ui, repo, fin, fout, conn,
578 return chgcmdserver(self.ui, repo, fin, fout, conn,
578 self._hashstate, self._baseaddress)
579 self._hashstate, self._baseaddress)
579
580
580 def chgunixservice(ui, repo, opts):
581 def chgunixservice(ui, repo, opts):
581 # CHGINTERNALMARK is set by chg client. It is an indication of things are
582 # CHGINTERNALMARK is set by chg client. It is an indication of things are
582 # started by chg so other code can do things accordingly, like disabling
583 # started by chg so other code can do things accordingly, like disabling
583 # demandimport or detecting chg client started by chg client. When executed
584 # demandimport or detecting chg client started by chg client. When executed
584 # here, CHGINTERNALMARK is no longer useful and hence dropped to make
585 # here, CHGINTERNALMARK is no longer useful and hence dropped to make
585 # environ cleaner.
586 # environ cleaner.
586 if 'CHGINTERNALMARK' in encoding.environ:
587 if 'CHGINTERNALMARK' in encoding.environ:
587 del encoding.environ['CHGINTERNALMARK']
588 del encoding.environ['CHGINTERNALMARK']
588
589
589 if repo:
590 if repo:
590 # one chgserver can serve multiple repos. drop repo information
591 # one chgserver can serve multiple repos. drop repo information
591 ui.setconfig('bundle', 'mainreporoot', '', 'repo')
592 ui.setconfig('bundle', 'mainreporoot', '', 'repo')
592 h = chgunixservicehandler(ui)
593 h = chgunixservicehandler(ui)
593 return commandserver.unixforkingservice(ui, repo=None, opts=opts, handler=h)
594 return commandserver.unixforkingservice(ui, repo=None, opts=opts, handler=h)
@@ -1,2749 +1,2749 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import filecmp
11 import filecmp
12 import os
12 import os
13 import re
13 import re
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import (
17 from .node import (
18 addednodeid,
18 addednodeid,
19 bin,
19 bin,
20 hex,
20 hex,
21 modifiednodeid,
21 modifiednodeid,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 short,
24 short,
25 wdirid,
25 wdirid,
26 wdirnodes,
26 wdirnodes,
27 wdirrev,
27 wdirrev,
28 )
28 )
29 from .thirdparty import (
29 from .thirdparty import (
30 attr,
30 attr,
31 )
31 )
32 from . import (
32 from . import (
33 encoding,
33 encoding,
34 error,
34 error,
35 fileset,
35 fileset,
36 match as matchmod,
36 match as matchmod,
37 mdiff,
37 mdiff,
38 obsolete as obsmod,
38 obsolete as obsmod,
39 obsutil,
39 obsutil,
40 patch,
40 patch,
41 pathutil,
41 pathutil,
42 phases,
42 phases,
43 pycompat,
43 pycompat,
44 repoview,
44 repoview,
45 revlog,
45 revlog,
46 scmutil,
46 scmutil,
47 sparse,
47 sparse,
48 subrepo,
48 subrepo,
49 subrepoutil,
49 subrepoutil,
50 util,
50 util,
51 )
51 )
52 from .utils import dateutil
52 from .utils import dateutil
53
53
54 propertycache = util.propertycache
54 propertycache = util.propertycache
55
55
56 nonascii = re.compile(br'[^\x21-\x7f]').search
56 nonascii = re.compile(br'[^\x21-\x7f]').search
57
57
58 class basectx(object):
58 class basectx(object):
59 """A basectx object represents the common logic for its children:
59 """A basectx object represents the common logic for its children:
60 changectx: read-only context that is already present in the repo,
60 changectx: read-only context that is already present in the repo,
61 workingctx: a context that represents the working directory and can
61 workingctx: a context that represents the working directory and can
62 be committed,
62 be committed,
63 memctx: a context that represents changes in-memory and can also
63 memctx: a context that represents changes in-memory and can also
64 be committed."""
64 be committed."""
65 def __new__(cls, repo, changeid='', *args, **kwargs):
65 def __new__(cls, repo, changeid='', *args, **kwargs):
66 if isinstance(changeid, basectx):
66 if isinstance(changeid, basectx):
67 return changeid
67 return changeid
68
68
69 o = super(basectx, cls).__new__(cls)
69 o = super(basectx, cls).__new__(cls)
70
70
71 o._repo = repo
71 o._repo = repo
72 o._rev = nullrev
72 o._rev = nullrev
73 o._node = nullid
73 o._node = nullid
74
74
75 return o
75 return o
76
76
77 def __bytes__(self):
77 def __bytes__(self):
78 return short(self.node())
78 return short(self.node())
79
79
80 __str__ = encoding.strmethod(__bytes__)
80 __str__ = encoding.strmethod(__bytes__)
81
81
82 def __repr__(self):
82 def __repr__(self):
83 return r"<%s %s>" % (type(self).__name__, str(self))
83 return r"<%s %s>" % (type(self).__name__, str(self))
84
84
85 def __eq__(self, other):
85 def __eq__(self, other):
86 try:
86 try:
87 return type(self) == type(other) and self._rev == other._rev
87 return type(self) == type(other) and self._rev == other._rev
88 except AttributeError:
88 except AttributeError:
89 return False
89 return False
90
90
91 def __ne__(self, other):
91 def __ne__(self, other):
92 return not (self == other)
92 return not (self == other)
93
93
94 def __contains__(self, key):
94 def __contains__(self, key):
95 return key in self._manifest
95 return key in self._manifest
96
96
97 def __getitem__(self, key):
97 def __getitem__(self, key):
98 return self.filectx(key)
98 return self.filectx(key)
99
99
100 def __iter__(self):
100 def __iter__(self):
101 return iter(self._manifest)
101 return iter(self._manifest)
102
102
103 def _buildstatusmanifest(self, status):
103 def _buildstatusmanifest(self, status):
104 """Builds a manifest that includes the given status results, if this is
104 """Builds a manifest that includes the given status results, if this is
105 a working copy context. For non-working copy contexts, it just returns
105 a working copy context. For non-working copy contexts, it just returns
106 the normal manifest."""
106 the normal manifest."""
107 return self.manifest()
107 return self.manifest()
108
108
109 def _matchstatus(self, other, match):
109 def _matchstatus(self, other, match):
110 """This internal method provides a way for child objects to override the
110 """This internal method provides a way for child objects to override the
111 match operator.
111 match operator.
112 """
112 """
113 return match
113 return match
114
114
115 def _buildstatus(self, other, s, match, listignored, listclean,
115 def _buildstatus(self, other, s, match, listignored, listclean,
116 listunknown):
116 listunknown):
117 """build a status with respect to another context"""
117 """build a status with respect to another context"""
118 # Load earliest manifest first for caching reasons. More specifically,
118 # Load earliest manifest first for caching reasons. More specifically,
119 # if you have revisions 1000 and 1001, 1001 is probably stored as a
119 # if you have revisions 1000 and 1001, 1001 is probably stored as a
120 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
120 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
121 # 1000 and cache it so that when you read 1001, we just need to apply a
121 # 1000 and cache it so that when you read 1001, we just need to apply a
122 # delta to what's in the cache. So that's one full reconstruction + one
122 # delta to what's in the cache. So that's one full reconstruction + one
123 # delta application.
123 # delta application.
124 mf2 = None
124 mf2 = None
125 if self.rev() is not None and self.rev() < other.rev():
125 if self.rev() is not None and self.rev() < other.rev():
126 mf2 = self._buildstatusmanifest(s)
126 mf2 = self._buildstatusmanifest(s)
127 mf1 = other._buildstatusmanifest(s)
127 mf1 = other._buildstatusmanifest(s)
128 if mf2 is None:
128 if mf2 is None:
129 mf2 = self._buildstatusmanifest(s)
129 mf2 = self._buildstatusmanifest(s)
130
130
131 modified, added = [], []
131 modified, added = [], []
132 removed = []
132 removed = []
133 clean = []
133 clean = []
134 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
134 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
135 deletedset = set(deleted)
135 deletedset = set(deleted)
136 d = mf1.diff(mf2, match=match, clean=listclean)
136 d = mf1.diff(mf2, match=match, clean=listclean)
137 for fn, value in d.iteritems():
137 for fn, value in d.iteritems():
138 if fn in deletedset:
138 if fn in deletedset:
139 continue
139 continue
140 if value is None:
140 if value is None:
141 clean.append(fn)
141 clean.append(fn)
142 continue
142 continue
143 (node1, flag1), (node2, flag2) = value
143 (node1, flag1), (node2, flag2) = value
144 if node1 is None:
144 if node1 is None:
145 added.append(fn)
145 added.append(fn)
146 elif node2 is None:
146 elif node2 is None:
147 removed.append(fn)
147 removed.append(fn)
148 elif flag1 != flag2:
148 elif flag1 != flag2:
149 modified.append(fn)
149 modified.append(fn)
150 elif node2 not in wdirnodes:
150 elif node2 not in wdirnodes:
151 # When comparing files between two commits, we save time by
151 # When comparing files between two commits, we save time by
152 # not comparing the file contents when the nodeids differ.
152 # not comparing the file contents when the nodeids differ.
153 # Note that this means we incorrectly report a reverted change
153 # Note that this means we incorrectly report a reverted change
154 # to a file as a modification.
154 # to a file as a modification.
155 modified.append(fn)
155 modified.append(fn)
156 elif self[fn].cmp(other[fn]):
156 elif self[fn].cmp(other[fn]):
157 modified.append(fn)
157 modified.append(fn)
158 else:
158 else:
159 clean.append(fn)
159 clean.append(fn)
160
160
161 if removed:
161 if removed:
162 # need to filter files if they are already reported as removed
162 # need to filter files if they are already reported as removed
163 unknown = [fn for fn in unknown if fn not in mf1 and
163 unknown = [fn for fn in unknown if fn not in mf1 and
164 (not match or match(fn))]
164 (not match or match(fn))]
165 ignored = [fn for fn in ignored if fn not in mf1 and
165 ignored = [fn for fn in ignored if fn not in mf1 and
166 (not match or match(fn))]
166 (not match or match(fn))]
167 # if they're deleted, don't report them as removed
167 # if they're deleted, don't report them as removed
168 removed = [fn for fn in removed if fn not in deletedset]
168 removed = [fn for fn in removed if fn not in deletedset]
169
169
170 return scmutil.status(modified, added, removed, deleted, unknown,
170 return scmutil.status(modified, added, removed, deleted, unknown,
171 ignored, clean)
171 ignored, clean)
172
172
173 @propertycache
173 @propertycache
174 def substate(self):
174 def substate(self):
175 return subrepoutil.state(self, self._repo.ui)
175 return subrepoutil.state(self, self._repo.ui)
176
176
177 def subrev(self, subpath):
177 def subrev(self, subpath):
178 return self.substate[subpath][1]
178 return self.substate[subpath][1]
179
179
180 def rev(self):
180 def rev(self):
181 return self._rev
181 return self._rev
182 def node(self):
182 def node(self):
183 return self._node
183 return self._node
184 def hex(self):
184 def hex(self):
185 return hex(self.node())
185 return hex(self.node())
186 def manifest(self):
186 def manifest(self):
187 return self._manifest
187 return self._manifest
188 def manifestctx(self):
188 def manifestctx(self):
189 return self._manifestctx
189 return self._manifestctx
190 def repo(self):
190 def repo(self):
191 return self._repo
191 return self._repo
192 def phasestr(self):
192 def phasestr(self):
193 return phases.phasenames[self.phase()]
193 return phases.phasenames[self.phase()]
194 def mutable(self):
194 def mutable(self):
195 return self.phase() > phases.public
195 return self.phase() > phases.public
196
196
197 def getfileset(self, expr):
197 def getfileset(self, expr):
198 return fileset.getfileset(self, expr)
198 return fileset.getfileset(self, expr)
199
199
200 def obsolete(self):
200 def obsolete(self):
201 """True if the changeset is obsolete"""
201 """True if the changeset is obsolete"""
202 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
202 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
203
203
204 def extinct(self):
204 def extinct(self):
205 """True if the changeset is extinct"""
205 """True if the changeset is extinct"""
206 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
206 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
207
207
208 def orphan(self):
208 def orphan(self):
209 """True if the changeset is not obsolete but it's ancestor are"""
209 """True if the changeset is not obsolete but it's ancestor are"""
210 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
210 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
211
211
212 def phasedivergent(self):
212 def phasedivergent(self):
213 """True if the changeset try to be a successor of a public changeset
213 """True if the changeset try to be a successor of a public changeset
214
214
215 Only non-public and non-obsolete changesets may be bumped.
215 Only non-public and non-obsolete changesets may be bumped.
216 """
216 """
217 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
217 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
218
218
219 def contentdivergent(self):
219 def contentdivergent(self):
220 """Is a successors of a changeset with multiple possible successors set
220 """Is a successors of a changeset with multiple possible successors set
221
221
222 Only non-public and non-obsolete changesets may be divergent.
222 Only non-public and non-obsolete changesets may be divergent.
223 """
223 """
224 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
224 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
225
225
226 def isunstable(self):
226 def isunstable(self):
227 """True if the changeset is either unstable, bumped or divergent"""
227 """True if the changeset is either unstable, bumped or divergent"""
228 return self.orphan() or self.phasedivergent() or self.contentdivergent()
228 return self.orphan() or self.phasedivergent() or self.contentdivergent()
229
229
230 def instabilities(self):
230 def instabilities(self):
231 """return the list of instabilities affecting this changeset.
231 """return the list of instabilities affecting this changeset.
232
232
233 Instabilities are returned as strings. possible values are:
233 Instabilities are returned as strings. possible values are:
234 - orphan,
234 - orphan,
235 - phase-divergent,
235 - phase-divergent,
236 - content-divergent.
236 - content-divergent.
237 """
237 """
238 instabilities = []
238 instabilities = []
239 if self.orphan():
239 if self.orphan():
240 instabilities.append('orphan')
240 instabilities.append('orphan')
241 if self.phasedivergent():
241 if self.phasedivergent():
242 instabilities.append('phase-divergent')
242 instabilities.append('phase-divergent')
243 if self.contentdivergent():
243 if self.contentdivergent():
244 instabilities.append('content-divergent')
244 instabilities.append('content-divergent')
245 return instabilities
245 return instabilities
246
246
247 def parents(self):
247 def parents(self):
248 """return contexts for each parent changeset"""
248 """return contexts for each parent changeset"""
249 return self._parents
249 return self._parents
250
250
251 def p1(self):
251 def p1(self):
252 return self._parents[0]
252 return self._parents[0]
253
253
254 def p2(self):
254 def p2(self):
255 parents = self._parents
255 parents = self._parents
256 if len(parents) == 2:
256 if len(parents) == 2:
257 return parents[1]
257 return parents[1]
258 return changectx(self._repo, nullrev)
258 return changectx(self._repo, nullrev)
259
259
260 def _fileinfo(self, path):
260 def _fileinfo(self, path):
261 if r'_manifest' in self.__dict__:
261 if r'_manifest' in self.__dict__:
262 try:
262 try:
263 return self._manifest[path], self._manifest.flags(path)
263 return self._manifest[path], self._manifest.flags(path)
264 except KeyError:
264 except KeyError:
265 raise error.ManifestLookupError(self._node, path,
265 raise error.ManifestLookupError(self._node, path,
266 _('not found in manifest'))
266 _('not found in manifest'))
267 if r'_manifestdelta' in self.__dict__ or path in self.files():
267 if r'_manifestdelta' in self.__dict__ or path in self.files():
268 if path in self._manifestdelta:
268 if path in self._manifestdelta:
269 return (self._manifestdelta[path],
269 return (self._manifestdelta[path],
270 self._manifestdelta.flags(path))
270 self._manifestdelta.flags(path))
271 mfl = self._repo.manifestlog
271 mfl = self._repo.manifestlog
272 try:
272 try:
273 node, flag = mfl[self._changeset.manifest].find(path)
273 node, flag = mfl[self._changeset.manifest].find(path)
274 except KeyError:
274 except KeyError:
275 raise error.ManifestLookupError(self._node, path,
275 raise error.ManifestLookupError(self._node, path,
276 _('not found in manifest'))
276 _('not found in manifest'))
277
277
278 return node, flag
278 return node, flag
279
279
280 def filenode(self, path):
280 def filenode(self, path):
281 return self._fileinfo(path)[0]
281 return self._fileinfo(path)[0]
282
282
283 def flags(self, path):
283 def flags(self, path):
284 try:
284 try:
285 return self._fileinfo(path)[1]
285 return self._fileinfo(path)[1]
286 except error.LookupError:
286 except error.LookupError:
287 return ''
287 return ''
288
288
289 def sub(self, path, allowcreate=True):
289 def sub(self, path, allowcreate=True):
290 '''return a subrepo for the stored revision of path, never wdir()'''
290 '''return a subrepo for the stored revision of path, never wdir()'''
291 return subrepo.subrepo(self, path, allowcreate=allowcreate)
291 return subrepo.subrepo(self, path, allowcreate=allowcreate)
292
292
293 def nullsub(self, path, pctx):
293 def nullsub(self, path, pctx):
294 return subrepo.nullsubrepo(self, path, pctx)
294 return subrepo.nullsubrepo(self, path, pctx)
295
295
296 def workingsub(self, path):
296 def workingsub(self, path):
297 '''return a subrepo for the stored revision, or wdir if this is a wdir
297 '''return a subrepo for the stored revision, or wdir if this is a wdir
298 context.
298 context.
299 '''
299 '''
300 return subrepo.subrepo(self, path, allowwdir=True)
300 return subrepo.subrepo(self, path, allowwdir=True)
301
301
302 def match(self, pats=None, include=None, exclude=None, default='glob',
302 def match(self, pats=None, include=None, exclude=None, default='glob',
303 listsubrepos=False, badfn=None):
303 listsubrepos=False, badfn=None):
304 r = self._repo
304 r = self._repo
305 return matchmod.match(r.root, r.getcwd(), pats,
305 return matchmod.match(r.root, r.getcwd(), pats,
306 include, exclude, default,
306 include, exclude, default,
307 auditor=r.nofsauditor, ctx=self,
307 auditor=r.nofsauditor, ctx=self,
308 listsubrepos=listsubrepos, badfn=badfn)
308 listsubrepos=listsubrepos, badfn=badfn)
309
309
310 def diff(self, ctx2=None, match=None, **opts):
310 def diff(self, ctx2=None, match=None, **opts):
311 """Returns a diff generator for the given contexts and matcher"""
311 """Returns a diff generator for the given contexts and matcher"""
312 if ctx2 is None:
312 if ctx2 is None:
313 ctx2 = self.p1()
313 ctx2 = self.p1()
314 if ctx2 is not None:
314 if ctx2 is not None:
315 ctx2 = self._repo[ctx2]
315 ctx2 = self._repo[ctx2]
316 diffopts = patch.diffopts(self._repo.ui, pycompat.byteskwargs(opts))
316 diffopts = patch.diffopts(self._repo.ui, pycompat.byteskwargs(opts))
317 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
317 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
318
318
319 def dirs(self):
319 def dirs(self):
320 return self._manifest.dirs()
320 return self._manifest.dirs()
321
321
322 def hasdir(self, dir):
322 def hasdir(self, dir):
323 return self._manifest.hasdir(dir)
323 return self._manifest.hasdir(dir)
324
324
325 def status(self, other=None, match=None, listignored=False,
325 def status(self, other=None, match=None, listignored=False,
326 listclean=False, listunknown=False, listsubrepos=False):
326 listclean=False, listunknown=False, listsubrepos=False):
327 """return status of files between two nodes or node and working
327 """return status of files between two nodes or node and working
328 directory.
328 directory.
329
329
330 If other is None, compare this node with working directory.
330 If other is None, compare this node with working directory.
331
331
332 returns (modified, added, removed, deleted, unknown, ignored, clean)
332 returns (modified, added, removed, deleted, unknown, ignored, clean)
333 """
333 """
334
334
335 ctx1 = self
335 ctx1 = self
336 ctx2 = self._repo[other]
336 ctx2 = self._repo[other]
337
337
338 # This next code block is, admittedly, fragile logic that tests for
338 # This next code block is, admittedly, fragile logic that tests for
339 # reversing the contexts and wouldn't need to exist if it weren't for
339 # reversing the contexts and wouldn't need to exist if it weren't for
340 # the fast (and common) code path of comparing the working directory
340 # the fast (and common) code path of comparing the working directory
341 # with its first parent.
341 # with its first parent.
342 #
342 #
343 # What we're aiming for here is the ability to call:
343 # What we're aiming for here is the ability to call:
344 #
344 #
345 # workingctx.status(parentctx)
345 # workingctx.status(parentctx)
346 #
346 #
347 # If we always built the manifest for each context and compared those,
347 # If we always built the manifest for each context and compared those,
348 # then we'd be done. But the special case of the above call means we
348 # then we'd be done. But the special case of the above call means we
349 # just copy the manifest of the parent.
349 # just copy the manifest of the parent.
350 reversed = False
350 reversed = False
351 if (not isinstance(ctx1, changectx)
351 if (not isinstance(ctx1, changectx)
352 and isinstance(ctx2, changectx)):
352 and isinstance(ctx2, changectx)):
353 reversed = True
353 reversed = True
354 ctx1, ctx2 = ctx2, ctx1
354 ctx1, ctx2 = ctx2, ctx1
355
355
356 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
356 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
357 match = ctx2._matchstatus(ctx1, match)
357 match = ctx2._matchstatus(ctx1, match)
358 r = scmutil.status([], [], [], [], [], [], [])
358 r = scmutil.status([], [], [], [], [], [], [])
359 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
359 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
360 listunknown)
360 listunknown)
361
361
362 if reversed:
362 if reversed:
363 # Reverse added and removed. Clear deleted, unknown and ignored as
363 # Reverse added and removed. Clear deleted, unknown and ignored as
364 # these make no sense to reverse.
364 # these make no sense to reverse.
365 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
365 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
366 r.clean)
366 r.clean)
367
367
368 if listsubrepos:
368 if listsubrepos:
369 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
369 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
370 try:
370 try:
371 rev2 = ctx2.subrev(subpath)
371 rev2 = ctx2.subrev(subpath)
372 except KeyError:
372 except KeyError:
373 # A subrepo that existed in node1 was deleted between
373 # A subrepo that existed in node1 was deleted between
374 # node1 and node2 (inclusive). Thus, ctx2's substate
374 # node1 and node2 (inclusive). Thus, ctx2's substate
375 # won't contain that subpath. The best we can do ignore it.
375 # won't contain that subpath. The best we can do ignore it.
376 rev2 = None
376 rev2 = None
377 submatch = matchmod.subdirmatcher(subpath, match)
377 submatch = matchmod.subdirmatcher(subpath, match)
378 s = sub.status(rev2, match=submatch, ignored=listignored,
378 s = sub.status(rev2, match=submatch, ignored=listignored,
379 clean=listclean, unknown=listunknown,
379 clean=listclean, unknown=listunknown,
380 listsubrepos=True)
380 listsubrepos=True)
381 for rfiles, sfiles in zip(r, s):
381 for rfiles, sfiles in zip(r, s):
382 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
382 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
383
383
384 for l in r:
384 for l in r:
385 l.sort()
385 l.sort()
386
386
387 return r
387 return r
388
388
389 def _filterederror(repo, changeid):
389 def _filterederror(repo, changeid):
390 """build an exception to be raised about a filtered changeid
390 """build an exception to be raised about a filtered changeid
391
391
392 This is extracted in a function to help extensions (eg: evolve) to
392 This is extracted in a function to help extensions (eg: evolve) to
393 experiment with various message variants."""
393 experiment with various message variants."""
394 if repo.filtername.startswith('visible'):
394 if repo.filtername.startswith('visible'):
395
395
396 # Check if the changeset is obsolete
396 # Check if the changeset is obsolete
397 unfilteredrepo = repo.unfiltered()
397 unfilteredrepo = repo.unfiltered()
398 ctx = unfilteredrepo[changeid]
398 ctx = unfilteredrepo[changeid]
399
399
400 # If the changeset is obsolete, enrich the message with the reason
400 # If the changeset is obsolete, enrich the message with the reason
401 # that made this changeset not visible
401 # that made this changeset not visible
402 if ctx.obsolete():
402 if ctx.obsolete():
403 msg = obsutil._getfilteredreason(repo, changeid, ctx)
403 msg = obsutil._getfilteredreason(repo, changeid, ctx)
404 else:
404 else:
405 msg = _("hidden revision '%s'") % changeid
405 msg = _("hidden revision '%s'") % changeid
406
406
407 hint = _('use --hidden to access hidden revisions')
407 hint = _('use --hidden to access hidden revisions')
408
408
409 return error.FilteredRepoLookupError(msg, hint=hint)
409 return error.FilteredRepoLookupError(msg, hint=hint)
410 msg = _("filtered revision '%s' (not in '%s' subset)")
410 msg = _("filtered revision '%s' (not in '%s' subset)")
411 msg %= (changeid, repo.filtername)
411 msg %= (changeid, repo.filtername)
412 return error.FilteredRepoLookupError(msg)
412 return error.FilteredRepoLookupError(msg)
413
413
414 class changectx(basectx):
414 class changectx(basectx):
415 """A changecontext object makes access to data related to a particular
415 """A changecontext object makes access to data related to a particular
416 changeset convenient. It represents a read-only context already present in
416 changeset convenient. It represents a read-only context already present in
417 the repo."""
417 the repo."""
418 def __init__(self, repo, changeid=''):
418 def __init__(self, repo, changeid=''):
419 """changeid is a revision number, node, or tag"""
419 """changeid is a revision number, node, or tag"""
420
420
421 # since basectx.__new__ already took care of copying the object, we
421 # since basectx.__new__ already took care of copying the object, we
422 # don't need to do anything in __init__, so we just exit here
422 # don't need to do anything in __init__, so we just exit here
423 if isinstance(changeid, basectx):
423 if isinstance(changeid, basectx):
424 return
424 return
425
425
426 if changeid == '':
426 if changeid == '':
427 changeid = '.'
427 changeid = '.'
428 self._repo = repo
428 self._repo = repo
429
429
430 try:
430 try:
431 if isinstance(changeid, int):
431 if isinstance(changeid, int):
432 self._node = repo.changelog.node(changeid)
432 self._node = repo.changelog.node(changeid)
433 self._rev = changeid
433 self._rev = changeid
434 return
434 return
435 if not pycompat.ispy3 and isinstance(changeid, long):
435 if not pycompat.ispy3 and isinstance(changeid, long):
436 changeid = "%d" % changeid
436 changeid = "%d" % changeid
437 if changeid == 'null':
437 if changeid == 'null':
438 self._node = nullid
438 self._node = nullid
439 self._rev = nullrev
439 self._rev = nullrev
440 return
440 return
441 if changeid == 'tip':
441 if changeid == 'tip':
442 self._node = repo.changelog.tip()
442 self._node = repo.changelog.tip()
443 self._rev = repo.changelog.rev(self._node)
443 self._rev = repo.changelog.rev(self._node)
444 return
444 return
445 if (changeid == '.'
445 if (changeid == '.'
446 or repo.local() and changeid == repo.dirstate.p1()):
446 or repo.local() and changeid == repo.dirstate.p1()):
447 # this is a hack to delay/avoid loading obsmarkers
447 # this is a hack to delay/avoid loading obsmarkers
448 # when we know that '.' won't be hidden
448 # when we know that '.' won't be hidden
449 self._node = repo.dirstate.p1()
449 self._node = repo.dirstate.p1()
450 self._rev = repo.unfiltered().changelog.rev(self._node)
450 self._rev = repo.unfiltered().changelog.rev(self._node)
451 return
451 return
452 if len(changeid) == 20:
452 if len(changeid) == 20:
453 try:
453 try:
454 self._node = changeid
454 self._node = changeid
455 self._rev = repo.changelog.rev(changeid)
455 self._rev = repo.changelog.rev(changeid)
456 return
456 return
457 except error.FilteredRepoLookupError:
457 except error.FilteredRepoLookupError:
458 raise
458 raise
459 except LookupError:
459 except LookupError:
460 pass
460 pass
461
461
462 try:
462 try:
463 r = int(changeid)
463 r = int(changeid)
464 if '%d' % r != changeid:
464 if '%d' % r != changeid:
465 raise ValueError
465 raise ValueError
466 l = len(repo.changelog)
466 l = len(repo.changelog)
467 if r < 0:
467 if r < 0:
468 r += l
468 r += l
469 if r < 0 or r >= l and r != wdirrev:
469 if r < 0 or r >= l and r != wdirrev:
470 raise ValueError
470 raise ValueError
471 self._rev = r
471 self._rev = r
472 self._node = repo.changelog.node(r)
472 self._node = repo.changelog.node(r)
473 return
473 return
474 except error.FilteredIndexError:
474 except error.FilteredIndexError:
475 raise
475 raise
476 except (ValueError, OverflowError, IndexError):
476 except (ValueError, OverflowError, IndexError):
477 pass
477 pass
478
478
479 if len(changeid) == 40:
479 if len(changeid) == 40:
480 try:
480 try:
481 self._node = bin(changeid)
481 self._node = bin(changeid)
482 self._rev = repo.changelog.rev(self._node)
482 self._rev = repo.changelog.rev(self._node)
483 return
483 return
484 except error.FilteredLookupError:
484 except error.FilteredLookupError:
485 raise
485 raise
486 except (TypeError, LookupError):
486 except (TypeError, LookupError):
487 pass
487 pass
488
488
489 # lookup bookmarks through the name interface
489 # lookup bookmarks through the name interface
490 try:
490 try:
491 self._node = repo.names.singlenode(repo, changeid)
491 self._node = repo.names.singlenode(repo, changeid)
492 self._rev = repo.changelog.rev(self._node)
492 self._rev = repo.changelog.rev(self._node)
493 return
493 return
494 except KeyError:
494 except KeyError:
495 pass
495 pass
496 except error.FilteredRepoLookupError:
496 except error.FilteredRepoLookupError:
497 raise
497 raise
498 except error.RepoLookupError:
498 except error.RepoLookupError:
499 pass
499 pass
500
500
501 self._node = repo.unfiltered().changelog._partialmatch(changeid)
501 self._node = repo.unfiltered().changelog._partialmatch(changeid)
502 if self._node is not None:
502 if self._node is not None:
503 self._rev = repo.changelog.rev(self._node)
503 self._rev = repo.changelog.rev(self._node)
504 return
504 return
505
505
506 # lookup failed
506 # lookup failed
507 # check if it might have come from damaged dirstate
507 # check if it might have come from damaged dirstate
508 #
508 #
509 # XXX we could avoid the unfiltered if we had a recognizable
509 # XXX we could avoid the unfiltered if we had a recognizable
510 # exception for filtered changeset access
510 # exception for filtered changeset access
511 if (repo.local()
511 if (repo.local()
512 and changeid in repo.unfiltered().dirstate.parents()):
512 and changeid in repo.unfiltered().dirstate.parents()):
513 msg = _("working directory has unknown parent '%s'!")
513 msg = _("working directory has unknown parent '%s'!")
514 raise error.Abort(msg % short(changeid))
514 raise error.Abort(msg % short(changeid))
515 try:
515 try:
516 if len(changeid) == 20 and nonascii(changeid):
516 if len(changeid) == 20 and nonascii(changeid):
517 changeid = hex(changeid)
517 changeid = hex(changeid)
518 except TypeError:
518 except TypeError:
519 pass
519 pass
520 except (error.FilteredIndexError, error.FilteredLookupError,
520 except (error.FilteredIndexError, error.FilteredLookupError,
521 error.FilteredRepoLookupError):
521 error.FilteredRepoLookupError):
522 raise _filterederror(repo, changeid)
522 raise _filterederror(repo, changeid)
523 except IndexError:
523 except IndexError:
524 pass
524 pass
525 raise error.RepoLookupError(
525 raise error.RepoLookupError(
526 _("unknown revision '%s'") % changeid)
526 _("unknown revision '%s'") % changeid)
527
527
528 def __hash__(self):
528 def __hash__(self):
529 try:
529 try:
530 return hash(self._rev)
530 return hash(self._rev)
531 except AttributeError:
531 except AttributeError:
532 return id(self)
532 return id(self)
533
533
534 def __nonzero__(self):
534 def __nonzero__(self):
535 return self._rev != nullrev
535 return self._rev != nullrev
536
536
537 __bool__ = __nonzero__
537 __bool__ = __nonzero__
538
538
539 @propertycache
539 @propertycache
540 def _changeset(self):
540 def _changeset(self):
541 return self._repo.changelog.changelogrevision(self.rev())
541 return self._repo.changelog.changelogrevision(self.rev())
542
542
543 @propertycache
543 @propertycache
544 def _manifest(self):
544 def _manifest(self):
545 return self._manifestctx.read()
545 return self._manifestctx.read()
546
546
547 @property
547 @property
548 def _manifestctx(self):
548 def _manifestctx(self):
549 return self._repo.manifestlog[self._changeset.manifest]
549 return self._repo.manifestlog[self._changeset.manifest]
550
550
551 @propertycache
551 @propertycache
552 def _manifestdelta(self):
552 def _manifestdelta(self):
553 return self._manifestctx.readdelta()
553 return self._manifestctx.readdelta()
554
554
555 @propertycache
555 @propertycache
556 def _parents(self):
556 def _parents(self):
557 repo = self._repo
557 repo = self._repo
558 p1, p2 = repo.changelog.parentrevs(self._rev)
558 p1, p2 = repo.changelog.parentrevs(self._rev)
559 if p2 == nullrev:
559 if p2 == nullrev:
560 return [changectx(repo, p1)]
560 return [changectx(repo, p1)]
561 return [changectx(repo, p1), changectx(repo, p2)]
561 return [changectx(repo, p1), changectx(repo, p2)]
562
562
563 def changeset(self):
563 def changeset(self):
564 c = self._changeset
564 c = self._changeset
565 return (
565 return (
566 c.manifest,
566 c.manifest,
567 c.user,
567 c.user,
568 c.date,
568 c.date,
569 c.files,
569 c.files,
570 c.description,
570 c.description,
571 c.extra,
571 c.extra,
572 )
572 )
573 def manifestnode(self):
573 def manifestnode(self):
574 return self._changeset.manifest
574 return self._changeset.manifest
575
575
576 def user(self):
576 def user(self):
577 return self._changeset.user
577 return self._changeset.user
578 def date(self):
578 def date(self):
579 return self._changeset.date
579 return self._changeset.date
580 def files(self):
580 def files(self):
581 return self._changeset.files
581 return self._changeset.files
582 def description(self):
582 def description(self):
583 return self._changeset.description
583 return self._changeset.description
584 def branch(self):
584 def branch(self):
585 return encoding.tolocal(self._changeset.extra.get("branch"))
585 return encoding.tolocal(self._changeset.extra.get("branch"))
586 def closesbranch(self):
586 def closesbranch(self):
587 return 'close' in self._changeset.extra
587 return 'close' in self._changeset.extra
588 def extra(self):
588 def extra(self):
589 """Return a dict of extra information."""
589 """Return a dict of extra information."""
590 return self._changeset.extra
590 return self._changeset.extra
591 def tags(self):
591 def tags(self):
592 """Return a list of byte tag names"""
592 """Return a list of byte tag names"""
593 return self._repo.nodetags(self._node)
593 return self._repo.nodetags(self._node)
594 def bookmarks(self):
594 def bookmarks(self):
595 """Return a list of byte bookmark names."""
595 """Return a list of byte bookmark names."""
596 return self._repo.nodebookmarks(self._node)
596 return self._repo.nodebookmarks(self._node)
597 def phase(self):
597 def phase(self):
598 return self._repo._phasecache.phase(self._repo, self._rev)
598 return self._repo._phasecache.phase(self._repo, self._rev)
599 def hidden(self):
599 def hidden(self):
600 return self._rev in repoview.filterrevs(self._repo, 'visible')
600 return self._rev in repoview.filterrevs(self._repo, 'visible')
601
601
602 def isinmemory(self):
602 def isinmemory(self):
603 return False
603 return False
604
604
605 def children(self):
605 def children(self):
606 """return list of changectx contexts for each child changeset.
606 """return list of changectx contexts for each child changeset.
607
607
608 This returns only the immediate child changesets. Use descendants() to
608 This returns only the immediate child changesets. Use descendants() to
609 recursively walk children.
609 recursively walk children.
610 """
610 """
611 c = self._repo.changelog.children(self._node)
611 c = self._repo.changelog.children(self._node)
612 return [changectx(self._repo, x) for x in c]
612 return [changectx(self._repo, x) for x in c]
613
613
614 def ancestors(self):
614 def ancestors(self):
615 for a in self._repo.changelog.ancestors([self._rev]):
615 for a in self._repo.changelog.ancestors([self._rev]):
616 yield changectx(self._repo, a)
616 yield changectx(self._repo, a)
617
617
618 def descendants(self):
618 def descendants(self):
619 """Recursively yield all children of the changeset.
619 """Recursively yield all children of the changeset.
620
620
621 For just the immediate children, use children()
621 For just the immediate children, use children()
622 """
622 """
623 for d in self._repo.changelog.descendants([self._rev]):
623 for d in self._repo.changelog.descendants([self._rev]):
624 yield changectx(self._repo, d)
624 yield changectx(self._repo, d)
625
625
626 def filectx(self, path, fileid=None, filelog=None):
626 def filectx(self, path, fileid=None, filelog=None):
627 """get a file context from this changeset"""
627 """get a file context from this changeset"""
628 if fileid is None:
628 if fileid is None:
629 fileid = self.filenode(path)
629 fileid = self.filenode(path)
630 return filectx(self._repo, path, fileid=fileid,
630 return filectx(self._repo, path, fileid=fileid,
631 changectx=self, filelog=filelog)
631 changectx=self, filelog=filelog)
632
632
633 def ancestor(self, c2, warn=False):
633 def ancestor(self, c2, warn=False):
634 """return the "best" ancestor context of self and c2
634 """return the "best" ancestor context of self and c2
635
635
636 If there are multiple candidates, it will show a message and check
636 If there are multiple candidates, it will show a message and check
637 merge.preferancestor configuration before falling back to the
637 merge.preferancestor configuration before falling back to the
638 revlog ancestor."""
638 revlog ancestor."""
639 # deal with workingctxs
639 # deal with workingctxs
640 n2 = c2._node
640 n2 = c2._node
641 if n2 is None:
641 if n2 is None:
642 n2 = c2._parents[0]._node
642 n2 = c2._parents[0]._node
643 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
643 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
644 if not cahs:
644 if not cahs:
645 anc = nullid
645 anc = nullid
646 elif len(cahs) == 1:
646 elif len(cahs) == 1:
647 anc = cahs[0]
647 anc = cahs[0]
648 else:
648 else:
649 # experimental config: merge.preferancestor
649 # experimental config: merge.preferancestor
650 for r in self._repo.ui.configlist('merge', 'preferancestor'):
650 for r in self._repo.ui.configlist('merge', 'preferancestor'):
651 try:
651 try:
652 ctx = changectx(self._repo, r)
652 ctx = changectx(self._repo, r)
653 except error.RepoLookupError:
653 except error.RepoLookupError:
654 continue
654 continue
655 anc = ctx.node()
655 anc = ctx.node()
656 if anc in cahs:
656 if anc in cahs:
657 break
657 break
658 else:
658 else:
659 anc = self._repo.changelog.ancestor(self._node, n2)
659 anc = self._repo.changelog.ancestor(self._node, n2)
660 if warn:
660 if warn:
661 self._repo.ui.status(
661 self._repo.ui.status(
662 (_("note: using %s as ancestor of %s and %s\n") %
662 (_("note: using %s as ancestor of %s and %s\n") %
663 (short(anc), short(self._node), short(n2))) +
663 (short(anc), short(self._node), short(n2))) +
664 ''.join(_(" alternatively, use --config "
664 ''.join(_(" alternatively, use --config "
665 "merge.preferancestor=%s\n") %
665 "merge.preferancestor=%s\n") %
666 short(n) for n in sorted(cahs) if n != anc))
666 short(n) for n in sorted(cahs) if n != anc))
667 return changectx(self._repo, anc)
667 return changectx(self._repo, anc)
668
668
669 def descendant(self, other):
669 def descendant(self, other):
670 """True if other is descendant of this changeset"""
670 """True if other is descendant of this changeset"""
671 return self._repo.changelog.descendant(self._rev, other._rev)
671 return self._repo.changelog.descendant(self._rev, other._rev)
672
672
673 def walk(self, match):
673 def walk(self, match):
674 '''Generates matching file names.'''
674 '''Generates matching file names.'''
675
675
676 # Wrap match.bad method to have message with nodeid
676 # Wrap match.bad method to have message with nodeid
677 def bad(fn, msg):
677 def bad(fn, msg):
678 # The manifest doesn't know about subrepos, so don't complain about
678 # The manifest doesn't know about subrepos, so don't complain about
679 # paths into valid subrepos.
679 # paths into valid subrepos.
680 if any(fn == s or fn.startswith(s + '/')
680 if any(fn == s or fn.startswith(s + '/')
681 for s in self.substate):
681 for s in self.substate):
682 return
682 return
683 match.bad(fn, _('no such file in rev %s') % self)
683 match.bad(fn, _('no such file in rev %s') % self)
684
684
685 m = matchmod.badmatch(match, bad)
685 m = matchmod.badmatch(match, bad)
686 return self._manifest.walk(m)
686 return self._manifest.walk(m)
687
687
688 def matches(self, match):
688 def matches(self, match):
689 return self.walk(match)
689 return self.walk(match)
690
690
691 class basefilectx(object):
691 class basefilectx(object):
692 """A filecontext object represents the common logic for its children:
692 """A filecontext object represents the common logic for its children:
693 filectx: read-only access to a filerevision that is already present
693 filectx: read-only access to a filerevision that is already present
694 in the repo,
694 in the repo,
695 workingfilectx: a filecontext that represents files from the working
695 workingfilectx: a filecontext that represents files from the working
696 directory,
696 directory,
697 memfilectx: a filecontext that represents files in-memory,
697 memfilectx: a filecontext that represents files in-memory,
698 overlayfilectx: duplicate another filecontext with some fields overridden.
698 overlayfilectx: duplicate another filecontext with some fields overridden.
699 """
699 """
700 @propertycache
700 @propertycache
701 def _filelog(self):
701 def _filelog(self):
702 return self._repo.file(self._path)
702 return self._repo.file(self._path)
703
703
704 @propertycache
704 @propertycache
705 def _changeid(self):
705 def _changeid(self):
706 if r'_changeid' in self.__dict__:
706 if r'_changeid' in self.__dict__:
707 return self._changeid
707 return self._changeid
708 elif r'_changectx' in self.__dict__:
708 elif r'_changectx' in self.__dict__:
709 return self._changectx.rev()
709 return self._changectx.rev()
710 elif r'_descendantrev' in self.__dict__:
710 elif r'_descendantrev' in self.__dict__:
711 # this file context was created from a revision with a known
711 # this file context was created from a revision with a known
712 # descendant, we can (lazily) correct for linkrev aliases
712 # descendant, we can (lazily) correct for linkrev aliases
713 return self._adjustlinkrev(self._descendantrev)
713 return self._adjustlinkrev(self._descendantrev)
714 else:
714 else:
715 return self._filelog.linkrev(self._filerev)
715 return self._filelog.linkrev(self._filerev)
716
716
717 @propertycache
717 @propertycache
718 def _filenode(self):
718 def _filenode(self):
719 if r'_fileid' in self.__dict__:
719 if r'_fileid' in self.__dict__:
720 return self._filelog.lookup(self._fileid)
720 return self._filelog.lookup(self._fileid)
721 else:
721 else:
722 return self._changectx.filenode(self._path)
722 return self._changectx.filenode(self._path)
723
723
724 @propertycache
724 @propertycache
725 def _filerev(self):
725 def _filerev(self):
726 return self._filelog.rev(self._filenode)
726 return self._filelog.rev(self._filenode)
727
727
728 @propertycache
728 @propertycache
729 def _repopath(self):
729 def _repopath(self):
730 return self._path
730 return self._path
731
731
732 def __nonzero__(self):
732 def __nonzero__(self):
733 try:
733 try:
734 self._filenode
734 self._filenode
735 return True
735 return True
736 except error.LookupError:
736 except error.LookupError:
737 # file is missing
737 # file is missing
738 return False
738 return False
739
739
740 __bool__ = __nonzero__
740 __bool__ = __nonzero__
741
741
742 def __bytes__(self):
742 def __bytes__(self):
743 try:
743 try:
744 return "%s@%s" % (self.path(), self._changectx)
744 return "%s@%s" % (self.path(), self._changectx)
745 except error.LookupError:
745 except error.LookupError:
746 return "%s@???" % self.path()
746 return "%s@???" % self.path()
747
747
748 __str__ = encoding.strmethod(__bytes__)
748 __str__ = encoding.strmethod(__bytes__)
749
749
750 def __repr__(self):
750 def __repr__(self):
751 return r"<%s %s>" % (type(self).__name__, str(self))
751 return r"<%s %s>" % (type(self).__name__, str(self))
752
752
753 def __hash__(self):
753 def __hash__(self):
754 try:
754 try:
755 return hash((self._path, self._filenode))
755 return hash((self._path, self._filenode))
756 except AttributeError:
756 except AttributeError:
757 return id(self)
757 return id(self)
758
758
759 def __eq__(self, other):
759 def __eq__(self, other):
760 try:
760 try:
761 return (type(self) == type(other) and self._path == other._path
761 return (type(self) == type(other) and self._path == other._path
762 and self._filenode == other._filenode)
762 and self._filenode == other._filenode)
763 except AttributeError:
763 except AttributeError:
764 return False
764 return False
765
765
766 def __ne__(self, other):
766 def __ne__(self, other):
767 return not (self == other)
767 return not (self == other)
768
768
769 def filerev(self):
769 def filerev(self):
770 return self._filerev
770 return self._filerev
771 def filenode(self):
771 def filenode(self):
772 return self._filenode
772 return self._filenode
773 @propertycache
773 @propertycache
774 def _flags(self):
774 def _flags(self):
775 return self._changectx.flags(self._path)
775 return self._changectx.flags(self._path)
776 def flags(self):
776 def flags(self):
777 return self._flags
777 return self._flags
778 def filelog(self):
778 def filelog(self):
779 return self._filelog
779 return self._filelog
780 def rev(self):
780 def rev(self):
781 return self._changeid
781 return self._changeid
782 def linkrev(self):
782 def linkrev(self):
783 return self._filelog.linkrev(self._filerev)
783 return self._filelog.linkrev(self._filerev)
784 def node(self):
784 def node(self):
785 return self._changectx.node()
785 return self._changectx.node()
786 def hex(self):
786 def hex(self):
787 return self._changectx.hex()
787 return self._changectx.hex()
788 def user(self):
788 def user(self):
789 return self._changectx.user()
789 return self._changectx.user()
790 def date(self):
790 def date(self):
791 return self._changectx.date()
791 return self._changectx.date()
792 def files(self):
792 def files(self):
793 return self._changectx.files()
793 return self._changectx.files()
794 def description(self):
794 def description(self):
795 return self._changectx.description()
795 return self._changectx.description()
796 def branch(self):
796 def branch(self):
797 return self._changectx.branch()
797 return self._changectx.branch()
798 def extra(self):
798 def extra(self):
799 return self._changectx.extra()
799 return self._changectx.extra()
800 def phase(self):
800 def phase(self):
801 return self._changectx.phase()
801 return self._changectx.phase()
802 def phasestr(self):
802 def phasestr(self):
803 return self._changectx.phasestr()
803 return self._changectx.phasestr()
804 def obsolete(self):
804 def obsolete(self):
805 return self._changectx.obsolete()
805 return self._changectx.obsolete()
806 def instabilities(self):
806 def instabilities(self):
807 return self._changectx.instabilities()
807 return self._changectx.instabilities()
808 def manifest(self):
808 def manifest(self):
809 return self._changectx.manifest()
809 return self._changectx.manifest()
810 def changectx(self):
810 def changectx(self):
811 return self._changectx
811 return self._changectx
812 def renamed(self):
812 def renamed(self):
813 return self._copied
813 return self._copied
814 def repo(self):
814 def repo(self):
815 return self._repo
815 return self._repo
816 def size(self):
816 def size(self):
817 return len(self.data())
817 return len(self.data())
818
818
819 def path(self):
819 def path(self):
820 return self._path
820 return self._path
821
821
822 def isbinary(self):
822 def isbinary(self):
823 try:
823 try:
824 return util.binary(self.data())
824 return util.binary(self.data())
825 except IOError:
825 except IOError:
826 return False
826 return False
827 def isexec(self):
827 def isexec(self):
828 return 'x' in self.flags()
828 return 'x' in self.flags()
829 def islink(self):
829 def islink(self):
830 return 'l' in self.flags()
830 return 'l' in self.flags()
831
831
832 def isabsent(self):
832 def isabsent(self):
833 """whether this filectx represents a file not in self._changectx
833 """whether this filectx represents a file not in self._changectx
834
834
835 This is mainly for merge code to detect change/delete conflicts. This is
835 This is mainly for merge code to detect change/delete conflicts. This is
836 expected to be True for all subclasses of basectx."""
836 expected to be True for all subclasses of basectx."""
837 return False
837 return False
838
838
839 _customcmp = False
839 _customcmp = False
840 def cmp(self, fctx):
840 def cmp(self, fctx):
841 """compare with other file context
841 """compare with other file context
842
842
843 returns True if different than fctx.
843 returns True if different than fctx.
844 """
844 """
845 if fctx._customcmp:
845 if fctx._customcmp:
846 return fctx.cmp(self)
846 return fctx.cmp(self)
847
847
848 if (fctx._filenode is None
848 if (fctx._filenode is None
849 and (self._repo._encodefilterpats
849 and (self._repo._encodefilterpats
850 # if file data starts with '\1\n', empty metadata block is
850 # if file data starts with '\1\n', empty metadata block is
851 # prepended, which adds 4 bytes to filelog.size().
851 # prepended, which adds 4 bytes to filelog.size().
852 or self.size() - 4 == fctx.size())
852 or self.size() - 4 == fctx.size())
853 or self.size() == fctx.size()):
853 or self.size() == fctx.size()):
854 return self._filelog.cmp(self._filenode, fctx.data())
854 return self._filelog.cmp(self._filenode, fctx.data())
855
855
856 return True
856 return True
857
857
858 def _adjustlinkrev(self, srcrev, inclusive=False):
858 def _adjustlinkrev(self, srcrev, inclusive=False):
859 """return the first ancestor of <srcrev> introducing <fnode>
859 """return the first ancestor of <srcrev> introducing <fnode>
860
860
861 If the linkrev of the file revision does not point to an ancestor of
861 If the linkrev of the file revision does not point to an ancestor of
862 srcrev, we'll walk down the ancestors until we find one introducing
862 srcrev, we'll walk down the ancestors until we find one introducing
863 this file revision.
863 this file revision.
864
864
865 :srcrev: the changeset revision we search ancestors from
865 :srcrev: the changeset revision we search ancestors from
866 :inclusive: if true, the src revision will also be checked
866 :inclusive: if true, the src revision will also be checked
867 """
867 """
868 repo = self._repo
868 repo = self._repo
869 cl = repo.unfiltered().changelog
869 cl = repo.unfiltered().changelog
870 mfl = repo.manifestlog
870 mfl = repo.manifestlog
871 # fetch the linkrev
871 # fetch the linkrev
872 lkr = self.linkrev()
872 lkr = self.linkrev()
873 # hack to reuse ancestor computation when searching for renames
873 # hack to reuse ancestor computation when searching for renames
874 memberanc = getattr(self, '_ancestrycontext', None)
874 memberanc = getattr(self, '_ancestrycontext', None)
875 iteranc = None
875 iteranc = None
876 if srcrev is None:
876 if srcrev is None:
877 # wctx case, used by workingfilectx during mergecopy
877 # wctx case, used by workingfilectx during mergecopy
878 revs = [p.rev() for p in self._repo[None].parents()]
878 revs = [p.rev() for p in self._repo[None].parents()]
879 inclusive = True # we skipped the real (revless) source
879 inclusive = True # we skipped the real (revless) source
880 else:
880 else:
881 revs = [srcrev]
881 revs = [srcrev]
882 if memberanc is None:
882 if memberanc is None:
883 memberanc = iteranc = cl.ancestors(revs, lkr,
883 memberanc = iteranc = cl.ancestors(revs, lkr,
884 inclusive=inclusive)
884 inclusive=inclusive)
885 # check if this linkrev is an ancestor of srcrev
885 # check if this linkrev is an ancestor of srcrev
886 if lkr not in memberanc:
886 if lkr not in memberanc:
887 if iteranc is None:
887 if iteranc is None:
888 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
888 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
889 fnode = self._filenode
889 fnode = self._filenode
890 path = self._path
890 path = self._path
891 for a in iteranc:
891 for a in iteranc:
892 ac = cl.read(a) # get changeset data (we avoid object creation)
892 ac = cl.read(a) # get changeset data (we avoid object creation)
893 if path in ac[3]: # checking the 'files' field.
893 if path in ac[3]: # checking the 'files' field.
894 # The file has been touched, check if the content is
894 # The file has been touched, check if the content is
895 # similar to the one we search for.
895 # similar to the one we search for.
896 if fnode == mfl[ac[0]].readfast().get(path):
896 if fnode == mfl[ac[0]].readfast().get(path):
897 return a
897 return a
898 # In theory, we should never get out of that loop without a result.
898 # In theory, we should never get out of that loop without a result.
899 # But if manifest uses a buggy file revision (not children of the
899 # But if manifest uses a buggy file revision (not children of the
900 # one it replaces) we could. Such a buggy situation will likely
900 # one it replaces) we could. Such a buggy situation will likely
901 # result is crash somewhere else at to some point.
901 # result is crash somewhere else at to some point.
902 return lkr
902 return lkr
903
903
904 def introrev(self):
904 def introrev(self):
905 """return the rev of the changeset which introduced this file revision
905 """return the rev of the changeset which introduced this file revision
906
906
907 This method is different from linkrev because it take into account the
907 This method is different from linkrev because it take into account the
908 changeset the filectx was created from. It ensures the returned
908 changeset the filectx was created from. It ensures the returned
909 revision is one of its ancestors. This prevents bugs from
909 revision is one of its ancestors. This prevents bugs from
910 'linkrev-shadowing' when a file revision is used by multiple
910 'linkrev-shadowing' when a file revision is used by multiple
911 changesets.
911 changesets.
912 """
912 """
913 lkr = self.linkrev()
913 lkr = self.linkrev()
914 attrs = vars(self)
914 attrs = vars(self)
915 noctx = not (r'_changeid' in attrs or r'_changectx' in attrs)
915 noctx = not (r'_changeid' in attrs or r'_changectx' in attrs)
916 if noctx or self.rev() == lkr:
916 if noctx or self.rev() == lkr:
917 return self.linkrev()
917 return self.linkrev()
918 return self._adjustlinkrev(self.rev(), inclusive=True)
918 return self._adjustlinkrev(self.rev(), inclusive=True)
919
919
920 def introfilectx(self):
920 def introfilectx(self):
921 """Return filectx having identical contents, but pointing to the
921 """Return filectx having identical contents, but pointing to the
922 changeset revision where this filectx was introduced"""
922 changeset revision where this filectx was introduced"""
923 introrev = self.introrev()
923 introrev = self.introrev()
924 if self.rev() == introrev:
924 if self.rev() == introrev:
925 return self
925 return self
926 return self.filectx(self.filenode(), changeid=introrev)
926 return self.filectx(self.filenode(), changeid=introrev)
927
927
928 def _parentfilectx(self, path, fileid, filelog):
928 def _parentfilectx(self, path, fileid, filelog):
929 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
929 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
930 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
930 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
931 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
931 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
932 # If self is associated with a changeset (probably explicitly
932 # If self is associated with a changeset (probably explicitly
933 # fed), ensure the created filectx is associated with a
933 # fed), ensure the created filectx is associated with a
934 # changeset that is an ancestor of self.changectx.
934 # changeset that is an ancestor of self.changectx.
935 # This lets us later use _adjustlinkrev to get a correct link.
935 # This lets us later use _adjustlinkrev to get a correct link.
936 fctx._descendantrev = self.rev()
936 fctx._descendantrev = self.rev()
937 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
937 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
938 elif r'_descendantrev' in vars(self):
938 elif r'_descendantrev' in vars(self):
939 # Otherwise propagate _descendantrev if we have one associated.
939 # Otherwise propagate _descendantrev if we have one associated.
940 fctx._descendantrev = self._descendantrev
940 fctx._descendantrev = self._descendantrev
941 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
941 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
942 return fctx
942 return fctx
943
943
944 def parents(self):
944 def parents(self):
945 _path = self._path
945 _path = self._path
946 fl = self._filelog
946 fl = self._filelog
947 parents = self._filelog.parents(self._filenode)
947 parents = self._filelog.parents(self._filenode)
948 pl = [(_path, node, fl) for node in parents if node != nullid]
948 pl = [(_path, node, fl) for node in parents if node != nullid]
949
949
950 r = fl.renamed(self._filenode)
950 r = fl.renamed(self._filenode)
951 if r:
951 if r:
952 # - In the simple rename case, both parent are nullid, pl is empty.
952 # - In the simple rename case, both parent are nullid, pl is empty.
953 # - In case of merge, only one of the parent is null id and should
953 # - In case of merge, only one of the parent is null id and should
954 # be replaced with the rename information. This parent is -always-
954 # be replaced with the rename information. This parent is -always-
955 # the first one.
955 # the first one.
956 #
956 #
957 # As null id have always been filtered out in the previous list
957 # As null id have always been filtered out in the previous list
958 # comprehension, inserting to 0 will always result in "replacing
958 # comprehension, inserting to 0 will always result in "replacing
959 # first nullid parent with rename information.
959 # first nullid parent with rename information.
960 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
960 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
961
961
962 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
962 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
963
963
964 def p1(self):
964 def p1(self):
965 return self.parents()[0]
965 return self.parents()[0]
966
966
967 def p2(self):
967 def p2(self):
968 p = self.parents()
968 p = self.parents()
969 if len(p) == 2:
969 if len(p) == 2:
970 return p[1]
970 return p[1]
971 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
971 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
972
972
973 def annotate(self, follow=False, linenumber=False, skiprevs=None,
973 def annotate(self, follow=False, linenumber=False, skiprevs=None,
974 diffopts=None):
974 diffopts=None):
975 '''returns a list of tuples of ((ctx, number), line) for each line
975 '''returns a list of tuples of ((ctx, number), line) for each line
976 in the file, where ctx is the filectx of the node where
976 in the file, where ctx is the filectx of the node where
977 that line was last changed; if linenumber parameter is true, number is
977 that line was last changed; if linenumber parameter is true, number is
978 the line number at the first appearance in the managed file, otherwise,
978 the line number at the first appearance in the managed file, otherwise,
979 number has a fixed value of False.
979 number has a fixed value of False.
980 '''
980 '''
981
981
982 def lines(text):
982 def lines(text):
983 if text.endswith("\n"):
983 if text.endswith("\n"):
984 return text.count("\n")
984 return text.count("\n")
985 return text.count("\n") + int(bool(text))
985 return text.count("\n") + int(bool(text))
986
986
987 if linenumber:
987 if linenumber:
988 def decorate(text, rev):
988 def decorate(text, rev):
989 return ([annotateline(fctx=rev, lineno=i)
989 return ([annotateline(fctx=rev, lineno=i)
990 for i in xrange(1, lines(text) + 1)], text)
990 for i in xrange(1, lines(text) + 1)], text)
991 else:
991 else:
992 def decorate(text, rev):
992 def decorate(text, rev):
993 return ([annotateline(fctx=rev)] * lines(text), text)
993 return ([annotateline(fctx=rev)] * lines(text), text)
994
994
995 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
995 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
996
996
997 def parents(f):
997 def parents(f):
998 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
998 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
999 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
999 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1000 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1000 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1001 # isn't an ancestor of the srcrev.
1001 # isn't an ancestor of the srcrev.
1002 f._changeid
1002 f._changeid
1003 pl = f.parents()
1003 pl = f.parents()
1004
1004
1005 # Don't return renamed parents if we aren't following.
1005 # Don't return renamed parents if we aren't following.
1006 if not follow:
1006 if not follow:
1007 pl = [p for p in pl if p.path() == f.path()]
1007 pl = [p for p in pl if p.path() == f.path()]
1008
1008
1009 # renamed filectx won't have a filelog yet, so set it
1009 # renamed filectx won't have a filelog yet, so set it
1010 # from the cache to save time
1010 # from the cache to save time
1011 for p in pl:
1011 for p in pl:
1012 if not r'_filelog' in p.__dict__:
1012 if not r'_filelog' in p.__dict__:
1013 p._filelog = getlog(p.path())
1013 p._filelog = getlog(p.path())
1014
1014
1015 return pl
1015 return pl
1016
1016
1017 # use linkrev to find the first changeset where self appeared
1017 # use linkrev to find the first changeset where self appeared
1018 base = self.introfilectx()
1018 base = self.introfilectx()
1019 if getattr(base, '_ancestrycontext', None) is None:
1019 if getattr(base, '_ancestrycontext', None) is None:
1020 cl = self._repo.changelog
1020 cl = self._repo.changelog
1021 if base.rev() is None:
1021 if base.rev() is None:
1022 # wctx is not inclusive, but works because _ancestrycontext
1022 # wctx is not inclusive, but works because _ancestrycontext
1023 # is used to test filelog revisions
1023 # is used to test filelog revisions
1024 ac = cl.ancestors([p.rev() for p in base.parents()],
1024 ac = cl.ancestors([p.rev() for p in base.parents()],
1025 inclusive=True)
1025 inclusive=True)
1026 else:
1026 else:
1027 ac = cl.ancestors([base.rev()], inclusive=True)
1027 ac = cl.ancestors([base.rev()], inclusive=True)
1028 base._ancestrycontext = ac
1028 base._ancestrycontext = ac
1029
1029
1030 # This algorithm would prefer to be recursive, but Python is a
1030 # This algorithm would prefer to be recursive, but Python is a
1031 # bit recursion-hostile. Instead we do an iterative
1031 # bit recursion-hostile. Instead we do an iterative
1032 # depth-first search.
1032 # depth-first search.
1033
1033
1034 # 1st DFS pre-calculates pcache and needed
1034 # 1st DFS pre-calculates pcache and needed
1035 visit = [base]
1035 visit = [base]
1036 pcache = {}
1036 pcache = {}
1037 needed = {base: 1}
1037 needed = {base: 1}
1038 while visit:
1038 while visit:
1039 f = visit.pop()
1039 f = visit.pop()
1040 if f in pcache:
1040 if f in pcache:
1041 continue
1041 continue
1042 pl = parents(f)
1042 pl = parents(f)
1043 pcache[f] = pl
1043 pcache[f] = pl
1044 for p in pl:
1044 for p in pl:
1045 needed[p] = needed.get(p, 0) + 1
1045 needed[p] = needed.get(p, 0) + 1
1046 if p not in pcache:
1046 if p not in pcache:
1047 visit.append(p)
1047 visit.append(p)
1048
1048
1049 # 2nd DFS does the actual annotate
1049 # 2nd DFS does the actual annotate
1050 visit[:] = [base]
1050 visit[:] = [base]
1051 hist = {}
1051 hist = {}
1052 while visit:
1052 while visit:
1053 f = visit[-1]
1053 f = visit[-1]
1054 if f in hist:
1054 if f in hist:
1055 visit.pop()
1055 visit.pop()
1056 continue
1056 continue
1057
1057
1058 ready = True
1058 ready = True
1059 pl = pcache[f]
1059 pl = pcache[f]
1060 for p in pl:
1060 for p in pl:
1061 if p not in hist:
1061 if p not in hist:
1062 ready = False
1062 ready = False
1063 visit.append(p)
1063 visit.append(p)
1064 if ready:
1064 if ready:
1065 visit.pop()
1065 visit.pop()
1066 curr = decorate(f.data(), f)
1066 curr = decorate(f.data(), f)
1067 skipchild = False
1067 skipchild = False
1068 if skiprevs is not None:
1068 if skiprevs is not None:
1069 skipchild = f._changeid in skiprevs
1069 skipchild = f._changeid in skiprevs
1070 curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild,
1070 curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild,
1071 diffopts)
1071 diffopts)
1072 for p in pl:
1072 for p in pl:
1073 if needed[p] == 1:
1073 if needed[p] == 1:
1074 del hist[p]
1074 del hist[p]
1075 del needed[p]
1075 del needed[p]
1076 else:
1076 else:
1077 needed[p] -= 1
1077 needed[p] -= 1
1078
1078
1079 hist[f] = curr
1079 hist[f] = curr
1080 del pcache[f]
1080 del pcache[f]
1081
1081
1082 lineattrs, text = hist[base]
1082 lineattrs, text = hist[base]
1083 return pycompat.ziplist(lineattrs, mdiff.splitnewlines(text))
1083 return pycompat.ziplist(lineattrs, mdiff.splitnewlines(text))
1084
1084
1085 def ancestors(self, followfirst=False):
1085 def ancestors(self, followfirst=False):
1086 visit = {}
1086 visit = {}
1087 c = self
1087 c = self
1088 if followfirst:
1088 if followfirst:
1089 cut = 1
1089 cut = 1
1090 else:
1090 else:
1091 cut = None
1091 cut = None
1092
1092
1093 while True:
1093 while True:
1094 for parent in c.parents()[:cut]:
1094 for parent in c.parents()[:cut]:
1095 visit[(parent.linkrev(), parent.filenode())] = parent
1095 visit[(parent.linkrev(), parent.filenode())] = parent
1096 if not visit:
1096 if not visit:
1097 break
1097 break
1098 c = visit.pop(max(visit))
1098 c = visit.pop(max(visit))
1099 yield c
1099 yield c
1100
1100
1101 def decodeddata(self):
1101 def decodeddata(self):
1102 """Returns `data()` after running repository decoding filters.
1102 """Returns `data()` after running repository decoding filters.
1103
1103
1104 This is often equivalent to how the data would be expressed on disk.
1104 This is often equivalent to how the data would be expressed on disk.
1105 """
1105 """
1106 return self._repo.wwritedata(self.path(), self.data())
1106 return self._repo.wwritedata(self.path(), self.data())
1107
1107
1108 @attr.s(slots=True, frozen=True)
1108 @attr.s(slots=True, frozen=True)
1109 class annotateline(object):
1109 class annotateline(object):
1110 fctx = attr.ib()
1110 fctx = attr.ib()
1111 lineno = attr.ib(default=False)
1111 lineno = attr.ib(default=False)
1112 # Whether this annotation was the result of a skip-annotate.
1112 # Whether this annotation was the result of a skip-annotate.
1113 skip = attr.ib(default=False)
1113 skip = attr.ib(default=False)
1114
1114
1115 def _annotatepair(parents, childfctx, child, skipchild, diffopts):
1115 def _annotatepair(parents, childfctx, child, skipchild, diffopts):
1116 r'''
1116 r'''
1117 Given parent and child fctxes and annotate data for parents, for all lines
1117 Given parent and child fctxes and annotate data for parents, for all lines
1118 in either parent that match the child, annotate the child with the parent's
1118 in either parent that match the child, annotate the child with the parent's
1119 data.
1119 data.
1120
1120
1121 Additionally, if `skipchild` is True, replace all other lines with parent
1121 Additionally, if `skipchild` is True, replace all other lines with parent
1122 annotate data as well such that child is never blamed for any lines.
1122 annotate data as well such that child is never blamed for any lines.
1123
1123
1124 See test-annotate.py for unit tests.
1124 See test-annotate.py for unit tests.
1125 '''
1125 '''
1126 pblocks = [(parent, mdiff.allblocks(parent[1], child[1], opts=diffopts))
1126 pblocks = [(parent, mdiff.allblocks(parent[1], child[1], opts=diffopts))
1127 for parent in parents]
1127 for parent in parents]
1128
1128
1129 if skipchild:
1129 if skipchild:
1130 # Need to iterate over the blocks twice -- make it a list
1130 # Need to iterate over the blocks twice -- make it a list
1131 pblocks = [(p, list(blocks)) for (p, blocks) in pblocks]
1131 pblocks = [(p, list(blocks)) for (p, blocks) in pblocks]
1132 # Mercurial currently prefers p2 over p1 for annotate.
1132 # Mercurial currently prefers p2 over p1 for annotate.
1133 # TODO: change this?
1133 # TODO: change this?
1134 for parent, blocks in pblocks:
1134 for parent, blocks in pblocks:
1135 for (a1, a2, b1, b2), t in blocks:
1135 for (a1, a2, b1, b2), t in blocks:
1136 # Changed blocks ('!') or blocks made only of blank lines ('~')
1136 # Changed blocks ('!') or blocks made only of blank lines ('~')
1137 # belong to the child.
1137 # belong to the child.
1138 if t == '=':
1138 if t == '=':
1139 child[0][b1:b2] = parent[0][a1:a2]
1139 child[0][b1:b2] = parent[0][a1:a2]
1140
1140
1141 if skipchild:
1141 if skipchild:
1142 # Now try and match up anything that couldn't be matched,
1142 # Now try and match up anything that couldn't be matched,
1143 # Reversing pblocks maintains bias towards p2, matching above
1143 # Reversing pblocks maintains bias towards p2, matching above
1144 # behavior.
1144 # behavior.
1145 pblocks.reverse()
1145 pblocks.reverse()
1146
1146
1147 # The heuristics are:
1147 # The heuristics are:
1148 # * Work on blocks of changed lines (effectively diff hunks with -U0).
1148 # * Work on blocks of changed lines (effectively diff hunks with -U0).
1149 # This could potentially be smarter but works well enough.
1149 # This could potentially be smarter but works well enough.
1150 # * For a non-matching section, do a best-effort fit. Match lines in
1150 # * For a non-matching section, do a best-effort fit. Match lines in
1151 # diff hunks 1:1, dropping lines as necessary.
1151 # diff hunks 1:1, dropping lines as necessary.
1152 # * Repeat the last line as a last resort.
1152 # * Repeat the last line as a last resort.
1153
1153
1154 # First, replace as much as possible without repeating the last line.
1154 # First, replace as much as possible without repeating the last line.
1155 remaining = [(parent, []) for parent, _blocks in pblocks]
1155 remaining = [(parent, []) for parent, _blocks in pblocks]
1156 for idx, (parent, blocks) in enumerate(pblocks):
1156 for idx, (parent, blocks) in enumerate(pblocks):
1157 for (a1, a2, b1, b2), _t in blocks:
1157 for (a1, a2, b1, b2), _t in blocks:
1158 if a2 - a1 >= b2 - b1:
1158 if a2 - a1 >= b2 - b1:
1159 for bk in xrange(b1, b2):
1159 for bk in xrange(b1, b2):
1160 if child[0][bk].fctx == childfctx:
1160 if child[0][bk].fctx == childfctx:
1161 ak = min(a1 + (bk - b1), a2 - 1)
1161 ak = min(a1 + (bk - b1), a2 - 1)
1162 child[0][bk] = attr.evolve(parent[0][ak], skip=True)
1162 child[0][bk] = attr.evolve(parent[0][ak], skip=True)
1163 else:
1163 else:
1164 remaining[idx][1].append((a1, a2, b1, b2))
1164 remaining[idx][1].append((a1, a2, b1, b2))
1165
1165
1166 # Then, look at anything left, which might involve repeating the last
1166 # Then, look at anything left, which might involve repeating the last
1167 # line.
1167 # line.
1168 for parent, blocks in remaining:
1168 for parent, blocks in remaining:
1169 for a1, a2, b1, b2 in blocks:
1169 for a1, a2, b1, b2 in blocks:
1170 for bk in xrange(b1, b2):
1170 for bk in xrange(b1, b2):
1171 if child[0][bk].fctx == childfctx:
1171 if child[0][bk].fctx == childfctx:
1172 ak = min(a1 + (bk - b1), a2 - 1)
1172 ak = min(a1 + (bk - b1), a2 - 1)
1173 child[0][bk] = attr.evolve(parent[0][ak], skip=True)
1173 child[0][bk] = attr.evolve(parent[0][ak], skip=True)
1174 return child
1174 return child
1175
1175
1176 class filectx(basefilectx):
1176 class filectx(basefilectx):
1177 """A filecontext object makes access to data related to a particular
1177 """A filecontext object makes access to data related to a particular
1178 filerevision convenient."""
1178 filerevision convenient."""
1179 def __init__(self, repo, path, changeid=None, fileid=None,
1179 def __init__(self, repo, path, changeid=None, fileid=None,
1180 filelog=None, changectx=None):
1180 filelog=None, changectx=None):
1181 """changeid can be a changeset revision, node, or tag.
1181 """changeid can be a changeset revision, node, or tag.
1182 fileid can be a file revision or node."""
1182 fileid can be a file revision or node."""
1183 self._repo = repo
1183 self._repo = repo
1184 self._path = path
1184 self._path = path
1185
1185
1186 assert (changeid is not None
1186 assert (changeid is not None
1187 or fileid is not None
1187 or fileid is not None
1188 or changectx is not None), \
1188 or changectx is not None), \
1189 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1189 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1190 % (changeid, fileid, changectx))
1190 % (changeid, fileid, changectx))
1191
1191
1192 if filelog is not None:
1192 if filelog is not None:
1193 self._filelog = filelog
1193 self._filelog = filelog
1194
1194
1195 if changeid is not None:
1195 if changeid is not None:
1196 self._changeid = changeid
1196 self._changeid = changeid
1197 if changectx is not None:
1197 if changectx is not None:
1198 self._changectx = changectx
1198 self._changectx = changectx
1199 if fileid is not None:
1199 if fileid is not None:
1200 self._fileid = fileid
1200 self._fileid = fileid
1201
1201
1202 @propertycache
1202 @propertycache
1203 def _changectx(self):
1203 def _changectx(self):
1204 try:
1204 try:
1205 return changectx(self._repo, self._changeid)
1205 return changectx(self._repo, self._changeid)
1206 except error.FilteredRepoLookupError:
1206 except error.FilteredRepoLookupError:
1207 # Linkrev may point to any revision in the repository. When the
1207 # Linkrev may point to any revision in the repository. When the
1208 # repository is filtered this may lead to `filectx` trying to build
1208 # repository is filtered this may lead to `filectx` trying to build
1209 # `changectx` for filtered revision. In such case we fallback to
1209 # `changectx` for filtered revision. In such case we fallback to
1210 # creating `changectx` on the unfiltered version of the reposition.
1210 # creating `changectx` on the unfiltered version of the reposition.
1211 # This fallback should not be an issue because `changectx` from
1211 # This fallback should not be an issue because `changectx` from
1212 # `filectx` are not used in complex operations that care about
1212 # `filectx` are not used in complex operations that care about
1213 # filtering.
1213 # filtering.
1214 #
1214 #
1215 # This fallback is a cheap and dirty fix that prevent several
1215 # This fallback is a cheap and dirty fix that prevent several
1216 # crashes. It does not ensure the behavior is correct. However the
1216 # crashes. It does not ensure the behavior is correct. However the
1217 # behavior was not correct before filtering either and "incorrect
1217 # behavior was not correct before filtering either and "incorrect
1218 # behavior" is seen as better as "crash"
1218 # behavior" is seen as better as "crash"
1219 #
1219 #
1220 # Linkrevs have several serious troubles with filtering that are
1220 # Linkrevs have several serious troubles with filtering that are
1221 # complicated to solve. Proper handling of the issue here should be
1221 # complicated to solve. Proper handling of the issue here should be
1222 # considered when solving linkrev issue are on the table.
1222 # considered when solving linkrev issue are on the table.
1223 return changectx(self._repo.unfiltered(), self._changeid)
1223 return changectx(self._repo.unfiltered(), self._changeid)
1224
1224
1225 def filectx(self, fileid, changeid=None):
1225 def filectx(self, fileid, changeid=None):
1226 '''opens an arbitrary revision of the file without
1226 '''opens an arbitrary revision of the file without
1227 opening a new filelog'''
1227 opening a new filelog'''
1228 return filectx(self._repo, self._path, fileid=fileid,
1228 return filectx(self._repo, self._path, fileid=fileid,
1229 filelog=self._filelog, changeid=changeid)
1229 filelog=self._filelog, changeid=changeid)
1230
1230
1231 def rawdata(self):
1231 def rawdata(self):
1232 return self._filelog.revision(self._filenode, raw=True)
1232 return self._filelog.revision(self._filenode, raw=True)
1233
1233
1234 def rawflags(self):
1234 def rawflags(self):
1235 """low-level revlog flags"""
1235 """low-level revlog flags"""
1236 return self._filelog.flags(self._filerev)
1236 return self._filelog.flags(self._filerev)
1237
1237
1238 def data(self):
1238 def data(self):
1239 try:
1239 try:
1240 return self._filelog.read(self._filenode)
1240 return self._filelog.read(self._filenode)
1241 except error.CensoredNodeError:
1241 except error.CensoredNodeError:
1242 if self._repo.ui.config("censor", "policy") == "ignore":
1242 if self._repo.ui.config("censor", "policy") == "ignore":
1243 return ""
1243 return ""
1244 raise error.Abort(_("censored node: %s") % short(self._filenode),
1244 raise error.Abort(_("censored node: %s") % short(self._filenode),
1245 hint=_("set censor.policy to ignore errors"))
1245 hint=_("set censor.policy to ignore errors"))
1246
1246
1247 def size(self):
1247 def size(self):
1248 return self._filelog.size(self._filerev)
1248 return self._filelog.size(self._filerev)
1249
1249
1250 @propertycache
1250 @propertycache
1251 def _copied(self):
1251 def _copied(self):
1252 """check if file was actually renamed in this changeset revision
1252 """check if file was actually renamed in this changeset revision
1253
1253
1254 If rename logged in file revision, we report copy for changeset only
1254 If rename logged in file revision, we report copy for changeset only
1255 if file revisions linkrev points back to the changeset in question
1255 if file revisions linkrev points back to the changeset in question
1256 or both changeset parents contain different file revisions.
1256 or both changeset parents contain different file revisions.
1257 """
1257 """
1258
1258
1259 renamed = self._filelog.renamed(self._filenode)
1259 renamed = self._filelog.renamed(self._filenode)
1260 if not renamed:
1260 if not renamed:
1261 return renamed
1261 return renamed
1262
1262
1263 if self.rev() == self.linkrev():
1263 if self.rev() == self.linkrev():
1264 return renamed
1264 return renamed
1265
1265
1266 name = self.path()
1266 name = self.path()
1267 fnode = self._filenode
1267 fnode = self._filenode
1268 for p in self._changectx.parents():
1268 for p in self._changectx.parents():
1269 try:
1269 try:
1270 if fnode == p.filenode(name):
1270 if fnode == p.filenode(name):
1271 return None
1271 return None
1272 except error.LookupError:
1272 except error.LookupError:
1273 pass
1273 pass
1274 return renamed
1274 return renamed
1275
1275
1276 def children(self):
1276 def children(self):
1277 # hard for renames
1277 # hard for renames
1278 c = self._filelog.children(self._filenode)
1278 c = self._filelog.children(self._filenode)
1279 return [filectx(self._repo, self._path, fileid=x,
1279 return [filectx(self._repo, self._path, fileid=x,
1280 filelog=self._filelog) for x in c]
1280 filelog=self._filelog) for x in c]
1281
1281
1282 class committablectx(basectx):
1282 class committablectx(basectx):
1283 """A committablectx object provides common functionality for a context that
1283 """A committablectx object provides common functionality for a context that
1284 wants the ability to commit, e.g. workingctx or memctx."""
1284 wants the ability to commit, e.g. workingctx or memctx."""
1285 def __init__(self, repo, text="", user=None, date=None, extra=None,
1285 def __init__(self, repo, text="", user=None, date=None, extra=None,
1286 changes=None):
1286 changes=None):
1287 self._repo = repo
1287 self._repo = repo
1288 self._rev = None
1288 self._rev = None
1289 self._node = None
1289 self._node = None
1290 self._text = text
1290 self._text = text
1291 if date:
1291 if date:
1292 self._date = dateutil.parsedate(date)
1292 self._date = dateutil.parsedate(date)
1293 if user:
1293 if user:
1294 self._user = user
1294 self._user = user
1295 if changes:
1295 if changes:
1296 self._status = changes
1296 self._status = changes
1297
1297
1298 self._extra = {}
1298 self._extra = {}
1299 if extra:
1299 if extra:
1300 self._extra = extra.copy()
1300 self._extra = extra.copy()
1301 if 'branch' not in self._extra:
1301 if 'branch' not in self._extra:
1302 try:
1302 try:
1303 branch = encoding.fromlocal(self._repo.dirstate.branch())
1303 branch = encoding.fromlocal(self._repo.dirstate.branch())
1304 except UnicodeDecodeError:
1304 except UnicodeDecodeError:
1305 raise error.Abort(_('branch name not in UTF-8!'))
1305 raise error.Abort(_('branch name not in UTF-8!'))
1306 self._extra['branch'] = branch
1306 self._extra['branch'] = branch
1307 if self._extra['branch'] == '':
1307 if self._extra['branch'] == '':
1308 self._extra['branch'] = 'default'
1308 self._extra['branch'] = 'default'
1309
1309
1310 def __bytes__(self):
1310 def __bytes__(self):
1311 return bytes(self._parents[0]) + "+"
1311 return bytes(self._parents[0]) + "+"
1312
1312
1313 __str__ = encoding.strmethod(__bytes__)
1313 __str__ = encoding.strmethod(__bytes__)
1314
1314
1315 def __nonzero__(self):
1315 def __nonzero__(self):
1316 return True
1316 return True
1317
1317
1318 __bool__ = __nonzero__
1318 __bool__ = __nonzero__
1319
1319
1320 def _buildflagfunc(self):
1320 def _buildflagfunc(self):
1321 # Create a fallback function for getting file flags when the
1321 # Create a fallback function for getting file flags when the
1322 # filesystem doesn't support them
1322 # filesystem doesn't support them
1323
1323
1324 copiesget = self._repo.dirstate.copies().get
1324 copiesget = self._repo.dirstate.copies().get
1325 parents = self.parents()
1325 parents = self.parents()
1326 if len(parents) < 2:
1326 if len(parents) < 2:
1327 # when we have one parent, it's easy: copy from parent
1327 # when we have one parent, it's easy: copy from parent
1328 man = parents[0].manifest()
1328 man = parents[0].manifest()
1329 def func(f):
1329 def func(f):
1330 f = copiesget(f, f)
1330 f = copiesget(f, f)
1331 return man.flags(f)
1331 return man.flags(f)
1332 else:
1332 else:
1333 # merges are tricky: we try to reconstruct the unstored
1333 # merges are tricky: we try to reconstruct the unstored
1334 # result from the merge (issue1802)
1334 # result from the merge (issue1802)
1335 p1, p2 = parents
1335 p1, p2 = parents
1336 pa = p1.ancestor(p2)
1336 pa = p1.ancestor(p2)
1337 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1337 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1338
1338
1339 def func(f):
1339 def func(f):
1340 f = copiesget(f, f) # may be wrong for merges with copies
1340 f = copiesget(f, f) # may be wrong for merges with copies
1341 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1341 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1342 if fl1 == fl2:
1342 if fl1 == fl2:
1343 return fl1
1343 return fl1
1344 if fl1 == fla:
1344 if fl1 == fla:
1345 return fl2
1345 return fl2
1346 if fl2 == fla:
1346 if fl2 == fla:
1347 return fl1
1347 return fl1
1348 return '' # punt for conflicts
1348 return '' # punt for conflicts
1349
1349
1350 return func
1350 return func
1351
1351
1352 @propertycache
1352 @propertycache
1353 def _flagfunc(self):
1353 def _flagfunc(self):
1354 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1354 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1355
1355
1356 @propertycache
1356 @propertycache
1357 def _status(self):
1357 def _status(self):
1358 return self._repo.status()
1358 return self._repo.status()
1359
1359
1360 @propertycache
1360 @propertycache
1361 def _user(self):
1361 def _user(self):
1362 return self._repo.ui.username()
1362 return self._repo.ui.username()
1363
1363
1364 @propertycache
1364 @propertycache
1365 def _date(self):
1365 def _date(self):
1366 ui = self._repo.ui
1366 ui = self._repo.ui
1367 date = ui.configdate('devel', 'default-date')
1367 date = ui.configdate('devel', 'default-date')
1368 if date is None:
1368 if date is None:
1369 date = dateutil.makedate()
1369 date = dateutil.makedate()
1370 return date
1370 return date
1371
1371
1372 def subrev(self, subpath):
1372 def subrev(self, subpath):
1373 return None
1373 return None
1374
1374
1375 def manifestnode(self):
1375 def manifestnode(self):
1376 return None
1376 return None
1377 def user(self):
1377 def user(self):
1378 return self._user or self._repo.ui.username()
1378 return self._user or self._repo.ui.username()
1379 def date(self):
1379 def date(self):
1380 return self._date
1380 return self._date
1381 def description(self):
1381 def description(self):
1382 return self._text
1382 return self._text
1383 def files(self):
1383 def files(self):
1384 return sorted(self._status.modified + self._status.added +
1384 return sorted(self._status.modified + self._status.added +
1385 self._status.removed)
1385 self._status.removed)
1386
1386
1387 def modified(self):
1387 def modified(self):
1388 return self._status.modified
1388 return self._status.modified
1389 def added(self):
1389 def added(self):
1390 return self._status.added
1390 return self._status.added
1391 def removed(self):
1391 def removed(self):
1392 return self._status.removed
1392 return self._status.removed
1393 def deleted(self):
1393 def deleted(self):
1394 return self._status.deleted
1394 return self._status.deleted
1395 def branch(self):
1395 def branch(self):
1396 return encoding.tolocal(self._extra['branch'])
1396 return encoding.tolocal(self._extra['branch'])
1397 def closesbranch(self):
1397 def closesbranch(self):
1398 return 'close' in self._extra
1398 return 'close' in self._extra
1399 def extra(self):
1399 def extra(self):
1400 return self._extra
1400 return self._extra
1401
1401
1402 def isinmemory(self):
1402 def isinmemory(self):
1403 return False
1403 return False
1404
1404
1405 def tags(self):
1405 def tags(self):
1406 return []
1406 return []
1407
1407
1408 def bookmarks(self):
1408 def bookmarks(self):
1409 b = []
1409 b = []
1410 for p in self.parents():
1410 for p in self.parents():
1411 b.extend(p.bookmarks())
1411 b.extend(p.bookmarks())
1412 return b
1412 return b
1413
1413
1414 def phase(self):
1414 def phase(self):
1415 phase = phases.draft # default phase to draft
1415 phase = phases.draft # default phase to draft
1416 for p in self.parents():
1416 for p in self.parents():
1417 phase = max(phase, p.phase())
1417 phase = max(phase, p.phase())
1418 return phase
1418 return phase
1419
1419
1420 def hidden(self):
1420 def hidden(self):
1421 return False
1421 return False
1422
1422
1423 def children(self):
1423 def children(self):
1424 return []
1424 return []
1425
1425
1426 def flags(self, path):
1426 def flags(self, path):
1427 if r'_manifest' in self.__dict__:
1427 if r'_manifest' in self.__dict__:
1428 try:
1428 try:
1429 return self._manifest.flags(path)
1429 return self._manifest.flags(path)
1430 except KeyError:
1430 except KeyError:
1431 return ''
1431 return ''
1432
1432
1433 try:
1433 try:
1434 return self._flagfunc(path)
1434 return self._flagfunc(path)
1435 except OSError:
1435 except OSError:
1436 return ''
1436 return ''
1437
1437
1438 def ancestor(self, c2):
1438 def ancestor(self, c2):
1439 """return the "best" ancestor context of self and c2"""
1439 """return the "best" ancestor context of self and c2"""
1440 return self._parents[0].ancestor(c2) # punt on two parents for now
1440 return self._parents[0].ancestor(c2) # punt on two parents for now
1441
1441
1442 def walk(self, match):
1442 def walk(self, match):
1443 '''Generates matching file names.'''
1443 '''Generates matching file names.'''
1444 return sorted(self._repo.dirstate.walk(match,
1444 return sorted(self._repo.dirstate.walk(match,
1445 subrepos=sorted(self.substate),
1445 subrepos=sorted(self.substate),
1446 unknown=True, ignored=False))
1446 unknown=True, ignored=False))
1447
1447
1448 def matches(self, match):
1448 def matches(self, match):
1449 return sorted(self._repo.dirstate.matches(match))
1449 return sorted(self._repo.dirstate.matches(match))
1450
1450
1451 def ancestors(self):
1451 def ancestors(self):
1452 for p in self._parents:
1452 for p in self._parents:
1453 yield p
1453 yield p
1454 for a in self._repo.changelog.ancestors(
1454 for a in self._repo.changelog.ancestors(
1455 [p.rev() for p in self._parents]):
1455 [p.rev() for p in self._parents]):
1456 yield changectx(self._repo, a)
1456 yield changectx(self._repo, a)
1457
1457
1458 def markcommitted(self, node):
1458 def markcommitted(self, node):
1459 """Perform post-commit cleanup necessary after committing this ctx
1459 """Perform post-commit cleanup necessary after committing this ctx
1460
1460
1461 Specifically, this updates backing stores this working context
1461 Specifically, this updates backing stores this working context
1462 wraps to reflect the fact that the changes reflected by this
1462 wraps to reflect the fact that the changes reflected by this
1463 workingctx have been committed. For example, it marks
1463 workingctx have been committed. For example, it marks
1464 modified and added files as normal in the dirstate.
1464 modified and added files as normal in the dirstate.
1465
1465
1466 """
1466 """
1467
1467
1468 with self._repo.dirstate.parentchange():
1468 with self._repo.dirstate.parentchange():
1469 for f in self.modified() + self.added():
1469 for f in self.modified() + self.added():
1470 self._repo.dirstate.normal(f)
1470 self._repo.dirstate.normal(f)
1471 for f in self.removed():
1471 for f in self.removed():
1472 self._repo.dirstate.drop(f)
1472 self._repo.dirstate.drop(f)
1473 self._repo.dirstate.setparents(node)
1473 self._repo.dirstate.setparents(node)
1474
1474
1475 # write changes out explicitly, because nesting wlock at
1475 # write changes out explicitly, because nesting wlock at
1476 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1476 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1477 # from immediately doing so for subsequent changing files
1477 # from immediately doing so for subsequent changing files
1478 self._repo.dirstate.write(self._repo.currenttransaction())
1478 self._repo.dirstate.write(self._repo.currenttransaction())
1479
1479
1480 def dirty(self, missing=False, merge=True, branch=True):
1480 def dirty(self, missing=False, merge=True, branch=True):
1481 return False
1481 return False
1482
1482
1483 class workingctx(committablectx):
1483 class workingctx(committablectx):
1484 """A workingctx object makes access to data related to
1484 """A workingctx object makes access to data related to
1485 the current working directory convenient.
1485 the current working directory convenient.
1486 date - any valid date string or (unixtime, offset), or None.
1486 date - any valid date string or (unixtime, offset), or None.
1487 user - username string, or None.
1487 user - username string, or None.
1488 extra - a dictionary of extra values, or None.
1488 extra - a dictionary of extra values, or None.
1489 changes - a list of file lists as returned by localrepo.status()
1489 changes - a list of file lists as returned by localrepo.status()
1490 or None to use the repository status.
1490 or None to use the repository status.
1491 """
1491 """
1492 def __init__(self, repo, text="", user=None, date=None, extra=None,
1492 def __init__(self, repo, text="", user=None, date=None, extra=None,
1493 changes=None):
1493 changes=None):
1494 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1494 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1495
1495
1496 def __iter__(self):
1496 def __iter__(self):
1497 d = self._repo.dirstate
1497 d = self._repo.dirstate
1498 for f in d:
1498 for f in d:
1499 if d[f] != 'r':
1499 if d[f] != 'r':
1500 yield f
1500 yield f
1501
1501
1502 def __contains__(self, key):
1502 def __contains__(self, key):
1503 return self._repo.dirstate[key] not in "?r"
1503 return self._repo.dirstate[key] not in "?r"
1504
1504
1505 def hex(self):
1505 def hex(self):
1506 return hex(wdirid)
1506 return hex(wdirid)
1507
1507
1508 @propertycache
1508 @propertycache
1509 def _parents(self):
1509 def _parents(self):
1510 p = self._repo.dirstate.parents()
1510 p = self._repo.dirstate.parents()
1511 if p[1] == nullid:
1511 if p[1] == nullid:
1512 p = p[:-1]
1512 p = p[:-1]
1513 return [changectx(self._repo, x) for x in p]
1513 return [changectx(self._repo, x) for x in p]
1514
1514
1515 def filectx(self, path, filelog=None):
1515 def filectx(self, path, filelog=None):
1516 """get a file context from the working directory"""
1516 """get a file context from the working directory"""
1517 return workingfilectx(self._repo, path, workingctx=self,
1517 return workingfilectx(self._repo, path, workingctx=self,
1518 filelog=filelog)
1518 filelog=filelog)
1519
1519
1520 def dirty(self, missing=False, merge=True, branch=True):
1520 def dirty(self, missing=False, merge=True, branch=True):
1521 "check whether a working directory is modified"
1521 "check whether a working directory is modified"
1522 # check subrepos first
1522 # check subrepos first
1523 for s in sorted(self.substate):
1523 for s in sorted(self.substate):
1524 if self.sub(s).dirty(missing=missing):
1524 if self.sub(s).dirty(missing=missing):
1525 return True
1525 return True
1526 # check current working dir
1526 # check current working dir
1527 return ((merge and self.p2()) or
1527 return ((merge and self.p2()) or
1528 (branch and self.branch() != self.p1().branch()) or
1528 (branch and self.branch() != self.p1().branch()) or
1529 self.modified() or self.added() or self.removed() or
1529 self.modified() or self.added() or self.removed() or
1530 (missing and self.deleted()))
1530 (missing and self.deleted()))
1531
1531
1532 def add(self, list, prefix=""):
1532 def add(self, list, prefix=""):
1533 with self._repo.wlock():
1533 with self._repo.wlock():
1534 ui, ds = self._repo.ui, self._repo.dirstate
1534 ui, ds = self._repo.ui, self._repo.dirstate
1535 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1535 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1536 rejected = []
1536 rejected = []
1537 lstat = self._repo.wvfs.lstat
1537 lstat = self._repo.wvfs.lstat
1538 for f in list:
1538 for f in list:
1539 # ds.pathto() returns an absolute file when this is invoked from
1539 # ds.pathto() returns an absolute file when this is invoked from
1540 # the keyword extension. That gets flagged as non-portable on
1540 # the keyword extension. That gets flagged as non-portable on
1541 # Windows, since it contains the drive letter and colon.
1541 # Windows, since it contains the drive letter and colon.
1542 scmutil.checkportable(ui, os.path.join(prefix, f))
1542 scmutil.checkportable(ui, os.path.join(prefix, f))
1543 try:
1543 try:
1544 st = lstat(f)
1544 st = lstat(f)
1545 except OSError:
1545 except OSError:
1546 ui.warn(_("%s does not exist!\n") % uipath(f))
1546 ui.warn(_("%s does not exist!\n") % uipath(f))
1547 rejected.append(f)
1547 rejected.append(f)
1548 continue
1548 continue
1549 if st.st_size > 10000000:
1549 if st.st_size > 10000000:
1550 ui.warn(_("%s: up to %d MB of RAM may be required "
1550 ui.warn(_("%s: up to %d MB of RAM may be required "
1551 "to manage this file\n"
1551 "to manage this file\n"
1552 "(use 'hg revert %s' to cancel the "
1552 "(use 'hg revert %s' to cancel the "
1553 "pending addition)\n")
1553 "pending addition)\n")
1554 % (f, 3 * st.st_size // 1000000, uipath(f)))
1554 % (f, 3 * st.st_size // 1000000, uipath(f)))
1555 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1555 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1556 ui.warn(_("%s not added: only files and symlinks "
1556 ui.warn(_("%s not added: only files and symlinks "
1557 "supported currently\n") % uipath(f))
1557 "supported currently\n") % uipath(f))
1558 rejected.append(f)
1558 rejected.append(f)
1559 elif ds[f] in 'amn':
1559 elif ds[f] in 'amn':
1560 ui.warn(_("%s already tracked!\n") % uipath(f))
1560 ui.warn(_("%s already tracked!\n") % uipath(f))
1561 elif ds[f] == 'r':
1561 elif ds[f] == 'r':
1562 ds.normallookup(f)
1562 ds.normallookup(f)
1563 else:
1563 else:
1564 ds.add(f)
1564 ds.add(f)
1565 return rejected
1565 return rejected
1566
1566
1567 def forget(self, files, prefix=""):
1567 def forget(self, files, prefix=""):
1568 with self._repo.wlock():
1568 with self._repo.wlock():
1569 ds = self._repo.dirstate
1569 ds = self._repo.dirstate
1570 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1570 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1571 rejected = []
1571 rejected = []
1572 for f in files:
1572 for f in files:
1573 if f not in self._repo.dirstate:
1573 if f not in self._repo.dirstate:
1574 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1574 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1575 rejected.append(f)
1575 rejected.append(f)
1576 elif self._repo.dirstate[f] != 'a':
1576 elif self._repo.dirstate[f] != 'a':
1577 self._repo.dirstate.remove(f)
1577 self._repo.dirstate.remove(f)
1578 else:
1578 else:
1579 self._repo.dirstate.drop(f)
1579 self._repo.dirstate.drop(f)
1580 return rejected
1580 return rejected
1581
1581
1582 def undelete(self, list):
1582 def undelete(self, list):
1583 pctxs = self.parents()
1583 pctxs = self.parents()
1584 with self._repo.wlock():
1584 with self._repo.wlock():
1585 ds = self._repo.dirstate
1585 ds = self._repo.dirstate
1586 for f in list:
1586 for f in list:
1587 if self._repo.dirstate[f] != 'r':
1587 if self._repo.dirstate[f] != 'r':
1588 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1588 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1589 else:
1589 else:
1590 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1590 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1591 t = fctx.data()
1591 t = fctx.data()
1592 self._repo.wwrite(f, t, fctx.flags())
1592 self._repo.wwrite(f, t, fctx.flags())
1593 self._repo.dirstate.normal(f)
1593 self._repo.dirstate.normal(f)
1594
1594
1595 def copy(self, source, dest):
1595 def copy(self, source, dest):
1596 try:
1596 try:
1597 st = self._repo.wvfs.lstat(dest)
1597 st = self._repo.wvfs.lstat(dest)
1598 except OSError as err:
1598 except OSError as err:
1599 if err.errno != errno.ENOENT:
1599 if err.errno != errno.ENOENT:
1600 raise
1600 raise
1601 self._repo.ui.warn(_("%s does not exist!\n")
1601 self._repo.ui.warn(_("%s does not exist!\n")
1602 % self._repo.dirstate.pathto(dest))
1602 % self._repo.dirstate.pathto(dest))
1603 return
1603 return
1604 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1604 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1605 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1605 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1606 "symbolic link\n")
1606 "symbolic link\n")
1607 % self._repo.dirstate.pathto(dest))
1607 % self._repo.dirstate.pathto(dest))
1608 else:
1608 else:
1609 with self._repo.wlock():
1609 with self._repo.wlock():
1610 if self._repo.dirstate[dest] in '?':
1610 if self._repo.dirstate[dest] in '?':
1611 self._repo.dirstate.add(dest)
1611 self._repo.dirstate.add(dest)
1612 elif self._repo.dirstate[dest] in 'r':
1612 elif self._repo.dirstate[dest] in 'r':
1613 self._repo.dirstate.normallookup(dest)
1613 self._repo.dirstate.normallookup(dest)
1614 self._repo.dirstate.copy(source, dest)
1614 self._repo.dirstate.copy(source, dest)
1615
1615
1616 def match(self, pats=None, include=None, exclude=None, default='glob',
1616 def match(self, pats=None, include=None, exclude=None, default='glob',
1617 listsubrepos=False, badfn=None):
1617 listsubrepos=False, badfn=None):
1618 r = self._repo
1618 r = self._repo
1619
1619
1620 # Only a case insensitive filesystem needs magic to translate user input
1620 # Only a case insensitive filesystem needs magic to translate user input
1621 # to actual case in the filesystem.
1621 # to actual case in the filesystem.
1622 icasefs = not util.fscasesensitive(r.root)
1622 icasefs = not util.fscasesensitive(r.root)
1623 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1623 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1624 default, auditor=r.auditor, ctx=self,
1624 default, auditor=r.auditor, ctx=self,
1625 listsubrepos=listsubrepos, badfn=badfn,
1625 listsubrepos=listsubrepos, badfn=badfn,
1626 icasefs=icasefs)
1626 icasefs=icasefs)
1627
1627
1628 def _filtersuspectsymlink(self, files):
1628 def _filtersuspectsymlink(self, files):
1629 if not files or self._repo.dirstate._checklink:
1629 if not files or self._repo.dirstate._checklink:
1630 return files
1630 return files
1631
1631
1632 # Symlink placeholders may get non-symlink-like contents
1632 # Symlink placeholders may get non-symlink-like contents
1633 # via user error or dereferencing by NFS or Samba servers,
1633 # via user error or dereferencing by NFS or Samba servers,
1634 # so we filter out any placeholders that don't look like a
1634 # so we filter out any placeholders that don't look like a
1635 # symlink
1635 # symlink
1636 sane = []
1636 sane = []
1637 for f in files:
1637 for f in files:
1638 if self.flags(f) == 'l':
1638 if self.flags(f) == 'l':
1639 d = self[f].data()
1639 d = self[f].data()
1640 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1640 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1641 self._repo.ui.debug('ignoring suspect symlink placeholder'
1641 self._repo.ui.debug('ignoring suspect symlink placeholder'
1642 ' "%s"\n' % f)
1642 ' "%s"\n' % f)
1643 continue
1643 continue
1644 sane.append(f)
1644 sane.append(f)
1645 return sane
1645 return sane
1646
1646
1647 def _checklookup(self, files):
1647 def _checklookup(self, files):
1648 # check for any possibly clean files
1648 # check for any possibly clean files
1649 if not files:
1649 if not files:
1650 return [], [], []
1650 return [], [], []
1651
1651
1652 modified = []
1652 modified = []
1653 deleted = []
1653 deleted = []
1654 fixup = []
1654 fixup = []
1655 pctx = self._parents[0]
1655 pctx = self._parents[0]
1656 # do a full compare of any files that might have changed
1656 # do a full compare of any files that might have changed
1657 for f in sorted(files):
1657 for f in sorted(files):
1658 try:
1658 try:
1659 # This will return True for a file that got replaced by a
1659 # This will return True for a file that got replaced by a
1660 # directory in the interim, but fixing that is pretty hard.
1660 # directory in the interim, but fixing that is pretty hard.
1661 if (f not in pctx or self.flags(f) != pctx.flags(f)
1661 if (f not in pctx or self.flags(f) != pctx.flags(f)
1662 or pctx[f].cmp(self[f])):
1662 or pctx[f].cmp(self[f])):
1663 modified.append(f)
1663 modified.append(f)
1664 else:
1664 else:
1665 fixup.append(f)
1665 fixup.append(f)
1666 except (IOError, OSError):
1666 except (IOError, OSError):
1667 # A file become inaccessible in between? Mark it as deleted,
1667 # A file become inaccessible in between? Mark it as deleted,
1668 # matching dirstate behavior (issue5584).
1668 # matching dirstate behavior (issue5584).
1669 # The dirstate has more complex behavior around whether a
1669 # The dirstate has more complex behavior around whether a
1670 # missing file matches a directory, etc, but we don't need to
1670 # missing file matches a directory, etc, but we don't need to
1671 # bother with that: if f has made it to this point, we're sure
1671 # bother with that: if f has made it to this point, we're sure
1672 # it's in the dirstate.
1672 # it's in the dirstate.
1673 deleted.append(f)
1673 deleted.append(f)
1674
1674
1675 return modified, deleted, fixup
1675 return modified, deleted, fixup
1676
1676
1677 def _poststatusfixup(self, status, fixup):
1677 def _poststatusfixup(self, status, fixup):
1678 """update dirstate for files that are actually clean"""
1678 """update dirstate for files that are actually clean"""
1679 poststatus = self._repo.postdsstatus()
1679 poststatus = self._repo.postdsstatus()
1680 if fixup or poststatus:
1680 if fixup or poststatus:
1681 try:
1681 try:
1682 oldid = self._repo.dirstate.identity()
1682 oldid = self._repo.dirstate.identity()
1683
1683
1684 # updating the dirstate is optional
1684 # updating the dirstate is optional
1685 # so we don't wait on the lock
1685 # so we don't wait on the lock
1686 # wlock can invalidate the dirstate, so cache normal _after_
1686 # wlock can invalidate the dirstate, so cache normal _after_
1687 # taking the lock
1687 # taking the lock
1688 with self._repo.wlock(False):
1688 with self._repo.wlock(False):
1689 if self._repo.dirstate.identity() == oldid:
1689 if self._repo.dirstate.identity() == oldid:
1690 if fixup:
1690 if fixup:
1691 normal = self._repo.dirstate.normal
1691 normal = self._repo.dirstate.normal
1692 for f in fixup:
1692 for f in fixup:
1693 normal(f)
1693 normal(f)
1694 # write changes out explicitly, because nesting
1694 # write changes out explicitly, because nesting
1695 # wlock at runtime may prevent 'wlock.release()'
1695 # wlock at runtime may prevent 'wlock.release()'
1696 # after this block from doing so for subsequent
1696 # after this block from doing so for subsequent
1697 # changing files
1697 # changing files
1698 tr = self._repo.currenttransaction()
1698 tr = self._repo.currenttransaction()
1699 self._repo.dirstate.write(tr)
1699 self._repo.dirstate.write(tr)
1700
1700
1701 if poststatus:
1701 if poststatus:
1702 for ps in poststatus:
1702 for ps in poststatus:
1703 ps(self, status)
1703 ps(self, status)
1704 else:
1704 else:
1705 # in this case, writing changes out breaks
1705 # in this case, writing changes out breaks
1706 # consistency, because .hg/dirstate was
1706 # consistency, because .hg/dirstate was
1707 # already changed simultaneously after last
1707 # already changed simultaneously after last
1708 # caching (see also issue5584 for detail)
1708 # caching (see also issue5584 for detail)
1709 self._repo.ui.debug('skip updating dirstate: '
1709 self._repo.ui.debug('skip updating dirstate: '
1710 'identity mismatch\n')
1710 'identity mismatch\n')
1711 except error.LockError:
1711 except error.LockError:
1712 pass
1712 pass
1713 finally:
1713 finally:
1714 # Even if the wlock couldn't be grabbed, clear out the list.
1714 # Even if the wlock couldn't be grabbed, clear out the list.
1715 self._repo.clearpostdsstatus()
1715 self._repo.clearpostdsstatus()
1716
1716
1717 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1717 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1718 '''Gets the status from the dirstate -- internal use only.'''
1718 '''Gets the status from the dirstate -- internal use only.'''
1719 subrepos = []
1719 subrepos = []
1720 if '.hgsub' in self:
1720 if '.hgsub' in self:
1721 subrepos = sorted(self.substate)
1721 subrepos = sorted(self.substate)
1722 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1722 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1723 clean=clean, unknown=unknown)
1723 clean=clean, unknown=unknown)
1724
1724
1725 # check for any possibly clean files
1725 # check for any possibly clean files
1726 fixup = []
1726 fixup = []
1727 if cmp:
1727 if cmp:
1728 modified2, deleted2, fixup = self._checklookup(cmp)
1728 modified2, deleted2, fixup = self._checklookup(cmp)
1729 s.modified.extend(modified2)
1729 s.modified.extend(modified2)
1730 s.deleted.extend(deleted2)
1730 s.deleted.extend(deleted2)
1731
1731
1732 if fixup and clean:
1732 if fixup and clean:
1733 s.clean.extend(fixup)
1733 s.clean.extend(fixup)
1734
1734
1735 self._poststatusfixup(s, fixup)
1735 self._poststatusfixup(s, fixup)
1736
1736
1737 if match.always():
1737 if match.always():
1738 # cache for performance
1738 # cache for performance
1739 if s.unknown or s.ignored or s.clean:
1739 if s.unknown or s.ignored or s.clean:
1740 # "_status" is cached with list*=False in the normal route
1740 # "_status" is cached with list*=False in the normal route
1741 self._status = scmutil.status(s.modified, s.added, s.removed,
1741 self._status = scmutil.status(s.modified, s.added, s.removed,
1742 s.deleted, [], [], [])
1742 s.deleted, [], [], [])
1743 else:
1743 else:
1744 self._status = s
1744 self._status = s
1745
1745
1746 return s
1746 return s
1747
1747
1748 @propertycache
1748 @propertycache
1749 def _manifest(self):
1749 def _manifest(self):
1750 """generate a manifest corresponding to the values in self._status
1750 """generate a manifest corresponding to the values in self._status
1751
1751
1752 This reuse the file nodeid from parent, but we use special node
1752 This reuse the file nodeid from parent, but we use special node
1753 identifiers for added and modified files. This is used by manifests
1753 identifiers for added and modified files. This is used by manifests
1754 merge to see that files are different and by update logic to avoid
1754 merge to see that files are different and by update logic to avoid
1755 deleting newly added files.
1755 deleting newly added files.
1756 """
1756 """
1757 return self._buildstatusmanifest(self._status)
1757 return self._buildstatusmanifest(self._status)
1758
1758
1759 def _buildstatusmanifest(self, status):
1759 def _buildstatusmanifest(self, status):
1760 """Builds a manifest that includes the given status results."""
1760 """Builds a manifest that includes the given status results."""
1761 parents = self.parents()
1761 parents = self.parents()
1762
1762
1763 man = parents[0].manifest().copy()
1763 man = parents[0].manifest().copy()
1764
1764
1765 ff = self._flagfunc
1765 ff = self._flagfunc
1766 for i, l in ((addednodeid, status.added),
1766 for i, l in ((addednodeid, status.added),
1767 (modifiednodeid, status.modified)):
1767 (modifiednodeid, status.modified)):
1768 for f in l:
1768 for f in l:
1769 man[f] = i
1769 man[f] = i
1770 try:
1770 try:
1771 man.setflag(f, ff(f))
1771 man.setflag(f, ff(f))
1772 except OSError:
1772 except OSError:
1773 pass
1773 pass
1774
1774
1775 for f in status.deleted + status.removed:
1775 for f in status.deleted + status.removed:
1776 if f in man:
1776 if f in man:
1777 del man[f]
1777 del man[f]
1778
1778
1779 return man
1779 return man
1780
1780
1781 def _buildstatus(self, other, s, match, listignored, listclean,
1781 def _buildstatus(self, other, s, match, listignored, listclean,
1782 listunknown):
1782 listunknown):
1783 """build a status with respect to another context
1783 """build a status with respect to another context
1784
1784
1785 This includes logic for maintaining the fast path of status when
1785 This includes logic for maintaining the fast path of status when
1786 comparing the working directory against its parent, which is to skip
1786 comparing the working directory against its parent, which is to skip
1787 building a new manifest if self (working directory) is not comparing
1787 building a new manifest if self (working directory) is not comparing
1788 against its parent (repo['.']).
1788 against its parent (repo['.']).
1789 """
1789 """
1790 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1790 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1791 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1791 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1792 # might have accidentally ended up with the entire contents of the file
1792 # might have accidentally ended up with the entire contents of the file
1793 # they are supposed to be linking to.
1793 # they are supposed to be linking to.
1794 s.modified[:] = self._filtersuspectsymlink(s.modified)
1794 s.modified[:] = self._filtersuspectsymlink(s.modified)
1795 if other != self._repo['.']:
1795 if other != self._repo['.']:
1796 s = super(workingctx, self)._buildstatus(other, s, match,
1796 s = super(workingctx, self)._buildstatus(other, s, match,
1797 listignored, listclean,
1797 listignored, listclean,
1798 listunknown)
1798 listunknown)
1799 return s
1799 return s
1800
1800
1801 def _matchstatus(self, other, match):
1801 def _matchstatus(self, other, match):
1802 """override the match method with a filter for directory patterns
1802 """override the match method with a filter for directory patterns
1803
1803
1804 We use inheritance to customize the match.bad method only in cases of
1804 We use inheritance to customize the match.bad method only in cases of
1805 workingctx since it belongs only to the working directory when
1805 workingctx since it belongs only to the working directory when
1806 comparing against the parent changeset.
1806 comparing against the parent changeset.
1807
1807
1808 If we aren't comparing against the working directory's parent, then we
1808 If we aren't comparing against the working directory's parent, then we
1809 just use the default match object sent to us.
1809 just use the default match object sent to us.
1810 """
1810 """
1811 if other != self._repo['.']:
1811 if other != self._repo['.']:
1812 def bad(f, msg):
1812 def bad(f, msg):
1813 # 'f' may be a directory pattern from 'match.files()',
1813 # 'f' may be a directory pattern from 'match.files()',
1814 # so 'f not in ctx1' is not enough
1814 # so 'f not in ctx1' is not enough
1815 if f not in other and not other.hasdir(f):
1815 if f not in other and not other.hasdir(f):
1816 self._repo.ui.warn('%s: %s\n' %
1816 self._repo.ui.warn('%s: %s\n' %
1817 (self._repo.dirstate.pathto(f), msg))
1817 (self._repo.dirstate.pathto(f), msg))
1818 match.bad = bad
1818 match.bad = bad
1819 return match
1819 return match
1820
1820
1821 def markcommitted(self, node):
1821 def markcommitted(self, node):
1822 super(workingctx, self).markcommitted(node)
1822 super(workingctx, self).markcommitted(node)
1823
1823
1824 sparse.aftercommit(self._repo, node)
1824 sparse.aftercommit(self._repo, node)
1825
1825
1826 class committablefilectx(basefilectx):
1826 class committablefilectx(basefilectx):
1827 """A committablefilectx provides common functionality for a file context
1827 """A committablefilectx provides common functionality for a file context
1828 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1828 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1829 def __init__(self, repo, path, filelog=None, ctx=None):
1829 def __init__(self, repo, path, filelog=None, ctx=None):
1830 self._repo = repo
1830 self._repo = repo
1831 self._path = path
1831 self._path = path
1832 self._changeid = None
1832 self._changeid = None
1833 self._filerev = self._filenode = None
1833 self._filerev = self._filenode = None
1834
1834
1835 if filelog is not None:
1835 if filelog is not None:
1836 self._filelog = filelog
1836 self._filelog = filelog
1837 if ctx:
1837 if ctx:
1838 self._changectx = ctx
1838 self._changectx = ctx
1839
1839
1840 def __nonzero__(self):
1840 def __nonzero__(self):
1841 return True
1841 return True
1842
1842
1843 __bool__ = __nonzero__
1843 __bool__ = __nonzero__
1844
1844
1845 def linkrev(self):
1845 def linkrev(self):
1846 # linked to self._changectx no matter if file is modified or not
1846 # linked to self._changectx no matter if file is modified or not
1847 return self.rev()
1847 return self.rev()
1848
1848
1849 def parents(self):
1849 def parents(self):
1850 '''return parent filectxs, following copies if necessary'''
1850 '''return parent filectxs, following copies if necessary'''
1851 def filenode(ctx, path):
1851 def filenode(ctx, path):
1852 return ctx._manifest.get(path, nullid)
1852 return ctx._manifest.get(path, nullid)
1853
1853
1854 path = self._path
1854 path = self._path
1855 fl = self._filelog
1855 fl = self._filelog
1856 pcl = self._changectx._parents
1856 pcl = self._changectx._parents
1857 renamed = self.renamed()
1857 renamed = self.renamed()
1858
1858
1859 if renamed:
1859 if renamed:
1860 pl = [renamed + (None,)]
1860 pl = [renamed + (None,)]
1861 else:
1861 else:
1862 pl = [(path, filenode(pcl[0], path), fl)]
1862 pl = [(path, filenode(pcl[0], path), fl)]
1863
1863
1864 for pc in pcl[1:]:
1864 for pc in pcl[1:]:
1865 pl.append((path, filenode(pc, path), fl))
1865 pl.append((path, filenode(pc, path), fl))
1866
1866
1867 return [self._parentfilectx(p, fileid=n, filelog=l)
1867 return [self._parentfilectx(p, fileid=n, filelog=l)
1868 for p, n, l in pl if n != nullid]
1868 for p, n, l in pl if n != nullid]
1869
1869
1870 def children(self):
1870 def children(self):
1871 return []
1871 return []
1872
1872
1873 class workingfilectx(committablefilectx):
1873 class workingfilectx(committablefilectx):
1874 """A workingfilectx object makes access to data related to a particular
1874 """A workingfilectx object makes access to data related to a particular
1875 file in the working directory convenient."""
1875 file in the working directory convenient."""
1876 def __init__(self, repo, path, filelog=None, workingctx=None):
1876 def __init__(self, repo, path, filelog=None, workingctx=None):
1877 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1877 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1878
1878
1879 @propertycache
1879 @propertycache
1880 def _changectx(self):
1880 def _changectx(self):
1881 return workingctx(self._repo)
1881 return workingctx(self._repo)
1882
1882
1883 def data(self):
1883 def data(self):
1884 return self._repo.wread(self._path)
1884 return self._repo.wread(self._path)
1885 def renamed(self):
1885 def renamed(self):
1886 rp = self._repo.dirstate.copied(self._path)
1886 rp = self._repo.dirstate.copied(self._path)
1887 if not rp:
1887 if not rp:
1888 return None
1888 return None
1889 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1889 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1890
1890
1891 def size(self):
1891 def size(self):
1892 return self._repo.wvfs.lstat(self._path).st_size
1892 return self._repo.wvfs.lstat(self._path).st_size
1893 def date(self):
1893 def date(self):
1894 t, tz = self._changectx.date()
1894 t, tz = self._changectx.date()
1895 try:
1895 try:
1896 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1896 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1897 except OSError as err:
1897 except OSError as err:
1898 if err.errno != errno.ENOENT:
1898 if err.errno != errno.ENOENT:
1899 raise
1899 raise
1900 return (t, tz)
1900 return (t, tz)
1901
1901
1902 def exists(self):
1902 def exists(self):
1903 return self._repo.wvfs.exists(self._path)
1903 return self._repo.wvfs.exists(self._path)
1904
1904
1905 def lexists(self):
1905 def lexists(self):
1906 return self._repo.wvfs.lexists(self._path)
1906 return self._repo.wvfs.lexists(self._path)
1907
1907
1908 def audit(self):
1908 def audit(self):
1909 return self._repo.wvfs.audit(self._path)
1909 return self._repo.wvfs.audit(self._path)
1910
1910
1911 def cmp(self, fctx):
1911 def cmp(self, fctx):
1912 """compare with other file context
1912 """compare with other file context
1913
1913
1914 returns True if different than fctx.
1914 returns True if different than fctx.
1915 """
1915 """
1916 # fctx should be a filectx (not a workingfilectx)
1916 # fctx should be a filectx (not a workingfilectx)
1917 # invert comparison to reuse the same code path
1917 # invert comparison to reuse the same code path
1918 return fctx.cmp(self)
1918 return fctx.cmp(self)
1919
1919
1920 def remove(self, ignoremissing=False):
1920 def remove(self, ignoremissing=False):
1921 """wraps unlink for a repo's working directory"""
1921 """wraps unlink for a repo's working directory"""
1922 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1922 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1923
1923
1924 def write(self, data, flags, backgroundclose=False, **kwargs):
1924 def write(self, data, flags, backgroundclose=False, **kwargs):
1925 """wraps repo.wwrite"""
1925 """wraps repo.wwrite"""
1926 self._repo.wwrite(self._path, data, flags,
1926 self._repo.wwrite(self._path, data, flags,
1927 backgroundclose=backgroundclose,
1927 backgroundclose=backgroundclose,
1928 **kwargs)
1928 **kwargs)
1929
1929
1930 def markcopied(self, src):
1930 def markcopied(self, src):
1931 """marks this file a copy of `src`"""
1931 """marks this file a copy of `src`"""
1932 if self._repo.dirstate[self._path] in "nma":
1932 if self._repo.dirstate[self._path] in "nma":
1933 self._repo.dirstate.copy(src, self._path)
1933 self._repo.dirstate.copy(src, self._path)
1934
1934
1935 def clearunknown(self):
1935 def clearunknown(self):
1936 """Removes conflicting items in the working directory so that
1936 """Removes conflicting items in the working directory so that
1937 ``write()`` can be called successfully.
1937 ``write()`` can be called successfully.
1938 """
1938 """
1939 wvfs = self._repo.wvfs
1939 wvfs = self._repo.wvfs
1940 f = self._path
1940 f = self._path
1941 wvfs.audit(f)
1941 wvfs.audit(f)
1942 if wvfs.isdir(f) and not wvfs.islink(f):
1942 if wvfs.isdir(f) and not wvfs.islink(f):
1943 wvfs.rmtree(f, forcibly=True)
1943 wvfs.rmtree(f, forcibly=True)
1944 for p in reversed(list(util.finddirs(f))):
1944 for p in reversed(list(util.finddirs(f))):
1945 if wvfs.isfileorlink(p):
1945 if wvfs.isfileorlink(p):
1946 wvfs.unlink(p)
1946 wvfs.unlink(p)
1947 break
1947 break
1948
1948
1949 def setflags(self, l, x):
1949 def setflags(self, l, x):
1950 self._repo.wvfs.setflags(self._path, l, x)
1950 self._repo.wvfs.setflags(self._path, l, x)
1951
1951
1952 class overlayworkingctx(committablectx):
1952 class overlayworkingctx(committablectx):
1953 """Wraps another mutable context with a write-back cache that can be
1953 """Wraps another mutable context with a write-back cache that can be
1954 converted into a commit context.
1954 converted into a commit context.
1955
1955
1956 self._cache[path] maps to a dict with keys: {
1956 self._cache[path] maps to a dict with keys: {
1957 'exists': bool?
1957 'exists': bool?
1958 'date': date?
1958 'date': date?
1959 'data': str?
1959 'data': str?
1960 'flags': str?
1960 'flags': str?
1961 'copied': str? (path or None)
1961 'copied': str? (path or None)
1962 }
1962 }
1963 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1963 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1964 is `False`, the file was deleted.
1964 is `False`, the file was deleted.
1965 """
1965 """
1966
1966
1967 def __init__(self, repo):
1967 def __init__(self, repo):
1968 super(overlayworkingctx, self).__init__(repo)
1968 super(overlayworkingctx, self).__init__(repo)
1969 self._repo = repo
1969 self._repo = repo
1970 self.clean()
1970 self.clean()
1971
1971
1972 def setbase(self, wrappedctx):
1972 def setbase(self, wrappedctx):
1973 self._wrappedctx = wrappedctx
1973 self._wrappedctx = wrappedctx
1974 self._parents = [wrappedctx]
1974 self._parents = [wrappedctx]
1975 # Drop old manifest cache as it is now out of date.
1975 # Drop old manifest cache as it is now out of date.
1976 # This is necessary when, e.g., rebasing several nodes with one
1976 # This is necessary when, e.g., rebasing several nodes with one
1977 # ``overlayworkingctx`` (e.g. with --collapse).
1977 # ``overlayworkingctx`` (e.g. with --collapse).
1978 util.clearcachedproperty(self, '_manifest')
1978 util.clearcachedproperty(self, '_manifest')
1979
1979
1980 def data(self, path):
1980 def data(self, path):
1981 if self.isdirty(path):
1981 if self.isdirty(path):
1982 if self._cache[path]['exists']:
1982 if self._cache[path]['exists']:
1983 if self._cache[path]['data']:
1983 if self._cache[path]['data']:
1984 return self._cache[path]['data']
1984 return self._cache[path]['data']
1985 else:
1985 else:
1986 # Must fallback here, too, because we only set flags.
1986 # Must fallback here, too, because we only set flags.
1987 return self._wrappedctx[path].data()
1987 return self._wrappedctx[path].data()
1988 else:
1988 else:
1989 raise error.ProgrammingError("No such file or directory: %s" %
1989 raise error.ProgrammingError("No such file or directory: %s" %
1990 path)
1990 path)
1991 else:
1991 else:
1992 return self._wrappedctx[path].data()
1992 return self._wrappedctx[path].data()
1993
1993
1994 @propertycache
1994 @propertycache
1995 def _manifest(self):
1995 def _manifest(self):
1996 parents = self.parents()
1996 parents = self.parents()
1997 man = parents[0].manifest().copy()
1997 man = parents[0].manifest().copy()
1998
1998
1999 flag = self._flagfunc
1999 flag = self._flagfunc
2000 for path in self.added():
2000 for path in self.added():
2001 man[path] = addednodeid
2001 man[path] = addednodeid
2002 man.setflag(path, flag(path))
2002 man.setflag(path, flag(path))
2003 for path in self.modified():
2003 for path in self.modified():
2004 man[path] = modifiednodeid
2004 man[path] = modifiednodeid
2005 man.setflag(path, flag(path))
2005 man.setflag(path, flag(path))
2006 for path in self.removed():
2006 for path in self.removed():
2007 del man[path]
2007 del man[path]
2008 return man
2008 return man
2009
2009
2010 @propertycache
2010 @propertycache
2011 def _flagfunc(self):
2011 def _flagfunc(self):
2012 def f(path):
2012 def f(path):
2013 return self._cache[path]['flags']
2013 return self._cache[path]['flags']
2014 return f
2014 return f
2015
2015
2016 def files(self):
2016 def files(self):
2017 return sorted(self.added() + self.modified() + self.removed())
2017 return sorted(self.added() + self.modified() + self.removed())
2018
2018
2019 def modified(self):
2019 def modified(self):
2020 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
2020 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
2021 self._existsinparent(f)]
2021 self._existsinparent(f)]
2022
2022
2023 def added(self):
2023 def added(self):
2024 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
2024 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
2025 not self._existsinparent(f)]
2025 not self._existsinparent(f)]
2026
2026
2027 def removed(self):
2027 def removed(self):
2028 return [f for f in self._cache.keys() if
2028 return [f for f in self._cache.keys() if
2029 not self._cache[f]['exists'] and self._existsinparent(f)]
2029 not self._cache[f]['exists'] and self._existsinparent(f)]
2030
2030
2031 def isinmemory(self):
2031 def isinmemory(self):
2032 return True
2032 return True
2033
2033
2034 def filedate(self, path):
2034 def filedate(self, path):
2035 if self.isdirty(path):
2035 if self.isdirty(path):
2036 return self._cache[path]['date']
2036 return self._cache[path]['date']
2037 else:
2037 else:
2038 return self._wrappedctx[path].date()
2038 return self._wrappedctx[path].date()
2039
2039
2040 def markcopied(self, path, origin):
2040 def markcopied(self, path, origin):
2041 if self.isdirty(path):
2041 if self.isdirty(path):
2042 self._cache[path]['copied'] = origin
2042 self._cache[path]['copied'] = origin
2043 else:
2043 else:
2044 raise error.ProgrammingError('markcopied() called on clean context')
2044 raise error.ProgrammingError('markcopied() called on clean context')
2045
2045
2046 def copydata(self, path):
2046 def copydata(self, path):
2047 if self.isdirty(path):
2047 if self.isdirty(path):
2048 return self._cache[path]['copied']
2048 return self._cache[path]['copied']
2049 else:
2049 else:
2050 raise error.ProgrammingError('copydata() called on clean context')
2050 raise error.ProgrammingError('copydata() called on clean context')
2051
2051
2052 def flags(self, path):
2052 def flags(self, path):
2053 if self.isdirty(path):
2053 if self.isdirty(path):
2054 if self._cache[path]['exists']:
2054 if self._cache[path]['exists']:
2055 return self._cache[path]['flags']
2055 return self._cache[path]['flags']
2056 else:
2056 else:
2057 raise error.ProgrammingError("No such file or directory: %s" %
2057 raise error.ProgrammingError("No such file or directory: %s" %
2058 self._path)
2058 self._path)
2059 else:
2059 else:
2060 return self._wrappedctx[path].flags()
2060 return self._wrappedctx[path].flags()
2061
2061
2062 def _existsinparent(self, path):
2062 def _existsinparent(self, path):
2063 try:
2063 try:
2064 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2064 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2065 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2065 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2066 # with an ``exists()`` function.
2066 # with an ``exists()`` function.
2067 self._wrappedctx[path]
2067 self._wrappedctx[path]
2068 return True
2068 return True
2069 except error.ManifestLookupError:
2069 except error.ManifestLookupError:
2070 return False
2070 return False
2071
2071
2072 def _auditconflicts(self, path):
2072 def _auditconflicts(self, path):
2073 """Replicates conflict checks done by wvfs.write().
2073 """Replicates conflict checks done by wvfs.write().
2074
2074
2075 Since we never write to the filesystem and never call `applyupdates` in
2075 Since we never write to the filesystem and never call `applyupdates` in
2076 IMM, we'll never check that a path is actually writable -- e.g., because
2076 IMM, we'll never check that a path is actually writable -- e.g., because
2077 it adds `a/foo`, but `a` is actually a file in the other commit.
2077 it adds `a/foo`, but `a` is actually a file in the other commit.
2078 """
2078 """
2079 def fail(path, component):
2079 def fail(path, component):
2080 # p1() is the base and we're receiving "writes" for p2()'s
2080 # p1() is the base and we're receiving "writes" for p2()'s
2081 # files.
2081 # files.
2082 if 'l' in self.p1()[component].flags():
2082 if 'l' in self.p1()[component].flags():
2083 raise error.Abort("error: %s conflicts with symlink %s "
2083 raise error.Abort("error: %s conflicts with symlink %s "
2084 "in %s." % (path, component,
2084 "in %s." % (path, component,
2085 self.p1().rev()))
2085 self.p1().rev()))
2086 else:
2086 else:
2087 raise error.Abort("error: '%s' conflicts with file '%s' in "
2087 raise error.Abort("error: '%s' conflicts with file '%s' in "
2088 "%s." % (path, component,
2088 "%s." % (path, component,
2089 self.p1().rev()))
2089 self.p1().rev()))
2090
2090
2091 # Test that each new directory to be created to write this path from p2
2091 # Test that each new directory to be created to write this path from p2
2092 # is not a file in p1.
2092 # is not a file in p1.
2093 components = path.split('/')
2093 components = path.split('/')
2094 for i in xrange(len(components)):
2094 for i in xrange(len(components)):
2095 component = "/".join(components[0:i])
2095 component = "/".join(components[0:i])
2096 if component in self.p1():
2096 if component in self.p1():
2097 fail(path, component)
2097 fail(path, component)
2098
2098
2099 # Test the other direction -- that this path from p2 isn't a directory
2099 # Test the other direction -- that this path from p2 isn't a directory
2100 # in p1 (test that p1 doesn't any paths matching `path/*`).
2100 # in p1 (test that p1 doesn't any paths matching `path/*`).
2101 match = matchmod.match('/', '', [path + '/'], default=b'relpath')
2101 match = matchmod.match('/', '', [path + '/'], default=b'relpath')
2102 matches = self.p1().manifest().matches(match)
2102 matches = self.p1().manifest().matches(match)
2103 if len(matches) > 0:
2103 if len(matches) > 0:
2104 if len(matches) == 1 and matches.keys()[0] == path:
2104 if len(matches) == 1 and matches.keys()[0] == path:
2105 return
2105 return
2106 raise error.Abort("error: file '%s' cannot be written because "
2106 raise error.Abort("error: file '%s' cannot be written because "
2107 " '%s/' is a folder in %s (containing %d "
2107 " '%s/' is a folder in %s (containing %d "
2108 "entries: %s)"
2108 "entries: %s)"
2109 % (path, path, self.p1(), len(matches),
2109 % (path, path, self.p1(), len(matches),
2110 ', '.join(matches.keys())))
2110 ', '.join(matches.keys())))
2111
2111
2112 def write(self, path, data, flags='', **kwargs):
2112 def write(self, path, data, flags='', **kwargs):
2113 if data is None:
2113 if data is None:
2114 raise error.ProgrammingError("data must be non-None")
2114 raise error.ProgrammingError("data must be non-None")
2115 self._auditconflicts(path)
2115 self._auditconflicts(path)
2116 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
2116 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
2117 flags=flags)
2117 flags=flags)
2118
2118
2119 def setflags(self, path, l, x):
2119 def setflags(self, path, l, x):
2120 self._markdirty(path, exists=True, date=dateutil.makedate(),
2120 self._markdirty(path, exists=True, date=dateutil.makedate(),
2121 flags=(l and 'l' or '') + (x and 'x' or ''))
2121 flags=(l and 'l' or '') + (x and 'x' or ''))
2122
2122
2123 def remove(self, path):
2123 def remove(self, path):
2124 self._markdirty(path, exists=False)
2124 self._markdirty(path, exists=False)
2125
2125
2126 def exists(self, path):
2126 def exists(self, path):
2127 """exists behaves like `lexists`, but needs to follow symlinks and
2127 """exists behaves like `lexists`, but needs to follow symlinks and
2128 return False if they are broken.
2128 return False if they are broken.
2129 """
2129 """
2130 if self.isdirty(path):
2130 if self.isdirty(path):
2131 # If this path exists and is a symlink, "follow" it by calling
2131 # If this path exists and is a symlink, "follow" it by calling
2132 # exists on the destination path.
2132 # exists on the destination path.
2133 if (self._cache[path]['exists'] and
2133 if (self._cache[path]['exists'] and
2134 'l' in self._cache[path]['flags']):
2134 'l' in self._cache[path]['flags']):
2135 return self.exists(self._cache[path]['data'].strip())
2135 return self.exists(self._cache[path]['data'].strip())
2136 else:
2136 else:
2137 return self._cache[path]['exists']
2137 return self._cache[path]['exists']
2138
2138
2139 return self._existsinparent(path)
2139 return self._existsinparent(path)
2140
2140
2141 def lexists(self, path):
2141 def lexists(self, path):
2142 """lexists returns True if the path exists"""
2142 """lexists returns True if the path exists"""
2143 if self.isdirty(path):
2143 if self.isdirty(path):
2144 return self._cache[path]['exists']
2144 return self._cache[path]['exists']
2145
2145
2146 return self._existsinparent(path)
2146 return self._existsinparent(path)
2147
2147
2148 def size(self, path):
2148 def size(self, path):
2149 if self.isdirty(path):
2149 if self.isdirty(path):
2150 if self._cache[path]['exists']:
2150 if self._cache[path]['exists']:
2151 return len(self._cache[path]['data'])
2151 return len(self._cache[path]['data'])
2152 else:
2152 else:
2153 raise error.ProgrammingError("No such file or directory: %s" %
2153 raise error.ProgrammingError("No such file or directory: %s" %
2154 self._path)
2154 self._path)
2155 return self._wrappedctx[path].size()
2155 return self._wrappedctx[path].size()
2156
2156
2157 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
2157 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
2158 user=None, editor=None):
2158 user=None, editor=None):
2159 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2159 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2160 committed.
2160 committed.
2161
2161
2162 ``text`` is the commit message.
2162 ``text`` is the commit message.
2163 ``parents`` (optional) are rev numbers.
2163 ``parents`` (optional) are rev numbers.
2164 """
2164 """
2165 # Default parents to the wrapped contexts' if not passed.
2165 # Default parents to the wrapped contexts' if not passed.
2166 if parents is None:
2166 if parents is None:
2167 parents = self._wrappedctx.parents()
2167 parents = self._wrappedctx.parents()
2168 if len(parents) == 1:
2168 if len(parents) == 1:
2169 parents = (parents[0], None)
2169 parents = (parents[0], None)
2170
2170
2171 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2171 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2172 if parents[1] is None:
2172 if parents[1] is None:
2173 parents = (self._repo[parents[0]], None)
2173 parents = (self._repo[parents[0]], None)
2174 else:
2174 else:
2175 parents = (self._repo[parents[0]], self._repo[parents[1]])
2175 parents = (self._repo[parents[0]], self._repo[parents[1]])
2176
2176
2177 files = self._cache.keys()
2177 files = self._cache.keys()
2178 def getfile(repo, memctx, path):
2178 def getfile(repo, memctx, path):
2179 if self._cache[path]['exists']:
2179 if self._cache[path]['exists']:
2180 return memfilectx(repo, memctx, path,
2180 return memfilectx(repo, memctx, path,
2181 self._cache[path]['data'],
2181 self._cache[path]['data'],
2182 'l' in self._cache[path]['flags'],
2182 'l' in self._cache[path]['flags'],
2183 'x' in self._cache[path]['flags'],
2183 'x' in self._cache[path]['flags'],
2184 self._cache[path]['copied'])
2184 self._cache[path]['copied'])
2185 else:
2185 else:
2186 # Returning None, but including the path in `files`, is
2186 # Returning None, but including the path in `files`, is
2187 # necessary for memctx to register a deletion.
2187 # necessary for memctx to register a deletion.
2188 return None
2188 return None
2189 return memctx(self._repo, parents, text, files, getfile, date=date,
2189 return memctx(self._repo, parents, text, files, getfile, date=date,
2190 extra=extra, user=user, branch=branch, editor=editor)
2190 extra=extra, user=user, branch=branch, editor=editor)
2191
2191
2192 def isdirty(self, path):
2192 def isdirty(self, path):
2193 return path in self._cache
2193 return path in self._cache
2194
2194
2195 def isempty(self):
2195 def isempty(self):
2196 # We need to discard any keys that are actually clean before the empty
2196 # We need to discard any keys that are actually clean before the empty
2197 # commit check.
2197 # commit check.
2198 self._compact()
2198 self._compact()
2199 return len(self._cache) == 0
2199 return len(self._cache) == 0
2200
2200
2201 def clean(self):
2201 def clean(self):
2202 self._cache = {}
2202 self._cache = {}
2203
2203
2204 def _compact(self):
2204 def _compact(self):
2205 """Removes keys from the cache that are actually clean, by comparing
2205 """Removes keys from the cache that are actually clean, by comparing
2206 them with the underlying context.
2206 them with the underlying context.
2207
2207
2208 This can occur during the merge process, e.g. by passing --tool :local
2208 This can occur during the merge process, e.g. by passing --tool :local
2209 to resolve a conflict.
2209 to resolve a conflict.
2210 """
2210 """
2211 keys = []
2211 keys = []
2212 for path in self._cache.keys():
2212 for path in self._cache.keys():
2213 cache = self._cache[path]
2213 cache = self._cache[path]
2214 try:
2214 try:
2215 underlying = self._wrappedctx[path]
2215 underlying = self._wrappedctx[path]
2216 if (underlying.data() == cache['data'] and
2216 if (underlying.data() == cache['data'] and
2217 underlying.flags() == cache['flags']):
2217 underlying.flags() == cache['flags']):
2218 keys.append(path)
2218 keys.append(path)
2219 except error.ManifestLookupError:
2219 except error.ManifestLookupError:
2220 # Path not in the underlying manifest (created).
2220 # Path not in the underlying manifest (created).
2221 continue
2221 continue
2222
2222
2223 for path in keys:
2223 for path in keys:
2224 del self._cache[path]
2224 del self._cache[path]
2225 return keys
2225 return keys
2226
2226
2227 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2227 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2228 self._cache[path] = {
2228 self._cache[path] = {
2229 'exists': exists,
2229 'exists': exists,
2230 'data': data,
2230 'data': data,
2231 'date': date,
2231 'date': date,
2232 'flags': flags,
2232 'flags': flags,
2233 'copied': None,
2233 'copied': None,
2234 }
2234 }
2235
2235
2236 def filectx(self, path, filelog=None):
2236 def filectx(self, path, filelog=None):
2237 return overlayworkingfilectx(self._repo, path, parent=self,
2237 return overlayworkingfilectx(self._repo, path, parent=self,
2238 filelog=filelog)
2238 filelog=filelog)
2239
2239
2240 class overlayworkingfilectx(committablefilectx):
2240 class overlayworkingfilectx(committablefilectx):
2241 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2241 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2242 cache, which can be flushed through later by calling ``flush()``."""
2242 cache, which can be flushed through later by calling ``flush()``."""
2243
2243
2244 def __init__(self, repo, path, filelog=None, parent=None):
2244 def __init__(self, repo, path, filelog=None, parent=None):
2245 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2245 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2246 parent)
2246 parent)
2247 self._repo = repo
2247 self._repo = repo
2248 self._parent = parent
2248 self._parent = parent
2249 self._path = path
2249 self._path = path
2250
2250
2251 def cmp(self, fctx):
2251 def cmp(self, fctx):
2252 return self.data() != fctx.data()
2252 return self.data() != fctx.data()
2253
2253
2254 def changectx(self):
2254 def changectx(self):
2255 return self._parent
2255 return self._parent
2256
2256
2257 def data(self):
2257 def data(self):
2258 return self._parent.data(self._path)
2258 return self._parent.data(self._path)
2259
2259
2260 def date(self):
2260 def date(self):
2261 return self._parent.filedate(self._path)
2261 return self._parent.filedate(self._path)
2262
2262
2263 def exists(self):
2263 def exists(self):
2264 return self.lexists()
2264 return self.lexists()
2265
2265
2266 def lexists(self):
2266 def lexists(self):
2267 return self._parent.exists(self._path)
2267 return self._parent.exists(self._path)
2268
2268
2269 def renamed(self):
2269 def renamed(self):
2270 path = self._parent.copydata(self._path)
2270 path = self._parent.copydata(self._path)
2271 if not path:
2271 if not path:
2272 return None
2272 return None
2273 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2273 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2274
2274
2275 def size(self):
2275 def size(self):
2276 return self._parent.size(self._path)
2276 return self._parent.size(self._path)
2277
2277
2278 def markcopied(self, origin):
2278 def markcopied(self, origin):
2279 self._parent.markcopied(self._path, origin)
2279 self._parent.markcopied(self._path, origin)
2280
2280
2281 def audit(self):
2281 def audit(self):
2282 pass
2282 pass
2283
2283
2284 def flags(self):
2284 def flags(self):
2285 return self._parent.flags(self._path)
2285 return self._parent.flags(self._path)
2286
2286
2287 def setflags(self, islink, isexec):
2287 def setflags(self, islink, isexec):
2288 return self._parent.setflags(self._path, islink, isexec)
2288 return self._parent.setflags(self._path, islink, isexec)
2289
2289
2290 def write(self, data, flags, backgroundclose=False, **kwargs):
2290 def write(self, data, flags, backgroundclose=False, **kwargs):
2291 return self._parent.write(self._path, data, flags, **kwargs)
2291 return self._parent.write(self._path, data, flags, **kwargs)
2292
2292
2293 def remove(self, ignoremissing=False):
2293 def remove(self, ignoremissing=False):
2294 return self._parent.remove(self._path)
2294 return self._parent.remove(self._path)
2295
2295
2296 def clearunknown(self):
2296 def clearunknown(self):
2297 pass
2297 pass
2298
2298
2299 class workingcommitctx(workingctx):
2299 class workingcommitctx(workingctx):
2300 """A workingcommitctx object makes access to data related to
2300 """A workingcommitctx object makes access to data related to
2301 the revision being committed convenient.
2301 the revision being committed convenient.
2302
2302
2303 This hides changes in the working directory, if they aren't
2303 This hides changes in the working directory, if they aren't
2304 committed in this context.
2304 committed in this context.
2305 """
2305 """
2306 def __init__(self, repo, changes,
2306 def __init__(self, repo, changes,
2307 text="", user=None, date=None, extra=None):
2307 text="", user=None, date=None, extra=None):
2308 super(workingctx, self).__init__(repo, text, user, date, extra,
2308 super(workingctx, self).__init__(repo, text, user, date, extra,
2309 changes)
2309 changes)
2310
2310
2311 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2311 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2312 """Return matched files only in ``self._status``
2312 """Return matched files only in ``self._status``
2313
2313
2314 Uncommitted files appear "clean" via this context, even if
2314 Uncommitted files appear "clean" via this context, even if
2315 they aren't actually so in the working directory.
2315 they aren't actually so in the working directory.
2316 """
2316 """
2317 if clean:
2317 if clean:
2318 clean = [f for f in self._manifest if f not in self._changedset]
2318 clean = [f for f in self._manifest if f not in self._changedset]
2319 else:
2319 else:
2320 clean = []
2320 clean = []
2321 return scmutil.status([f for f in self._status.modified if match(f)],
2321 return scmutil.status([f for f in self._status.modified if match(f)],
2322 [f for f in self._status.added if match(f)],
2322 [f for f in self._status.added if match(f)],
2323 [f for f in self._status.removed if match(f)],
2323 [f for f in self._status.removed if match(f)],
2324 [], [], [], clean)
2324 [], [], [], clean)
2325
2325
2326 @propertycache
2326 @propertycache
2327 def _changedset(self):
2327 def _changedset(self):
2328 """Return the set of files changed in this context
2328 """Return the set of files changed in this context
2329 """
2329 """
2330 changed = set(self._status.modified)
2330 changed = set(self._status.modified)
2331 changed.update(self._status.added)
2331 changed.update(self._status.added)
2332 changed.update(self._status.removed)
2332 changed.update(self._status.removed)
2333 return changed
2333 return changed
2334
2334
2335 def makecachingfilectxfn(func):
2335 def makecachingfilectxfn(func):
2336 """Create a filectxfn that caches based on the path.
2336 """Create a filectxfn that caches based on the path.
2337
2337
2338 We can't use util.cachefunc because it uses all arguments as the cache
2338 We can't use util.cachefunc because it uses all arguments as the cache
2339 key and this creates a cycle since the arguments include the repo and
2339 key and this creates a cycle since the arguments include the repo and
2340 memctx.
2340 memctx.
2341 """
2341 """
2342 cache = {}
2342 cache = {}
2343
2343
2344 def getfilectx(repo, memctx, path):
2344 def getfilectx(repo, memctx, path):
2345 if path not in cache:
2345 if path not in cache:
2346 cache[path] = func(repo, memctx, path)
2346 cache[path] = func(repo, memctx, path)
2347 return cache[path]
2347 return cache[path]
2348
2348
2349 return getfilectx
2349 return getfilectx
2350
2350
2351 def memfilefromctx(ctx):
2351 def memfilefromctx(ctx):
2352 """Given a context return a memfilectx for ctx[path]
2352 """Given a context return a memfilectx for ctx[path]
2353
2353
2354 This is a convenience method for building a memctx based on another
2354 This is a convenience method for building a memctx based on another
2355 context.
2355 context.
2356 """
2356 """
2357 def getfilectx(repo, memctx, path):
2357 def getfilectx(repo, memctx, path):
2358 fctx = ctx[path]
2358 fctx = ctx[path]
2359 # this is weird but apparently we only keep track of one parent
2359 # this is weird but apparently we only keep track of one parent
2360 # (why not only store that instead of a tuple?)
2360 # (why not only store that instead of a tuple?)
2361 copied = fctx.renamed()
2361 copied = fctx.renamed()
2362 if copied:
2362 if copied:
2363 copied = copied[0]
2363 copied = copied[0]
2364 return memfilectx(repo, memctx, path, fctx.data(),
2364 return memfilectx(repo, memctx, path, fctx.data(),
2365 islink=fctx.islink(), isexec=fctx.isexec(),
2365 islink=fctx.islink(), isexec=fctx.isexec(),
2366 copied=copied)
2366 copied=copied)
2367
2367
2368 return getfilectx
2368 return getfilectx
2369
2369
2370 def memfilefrompatch(patchstore):
2370 def memfilefrompatch(patchstore):
2371 """Given a patch (e.g. patchstore object) return a memfilectx
2371 """Given a patch (e.g. patchstore object) return a memfilectx
2372
2372
2373 This is a convenience method for building a memctx based on a patchstore.
2373 This is a convenience method for building a memctx based on a patchstore.
2374 """
2374 """
2375 def getfilectx(repo, memctx, path):
2375 def getfilectx(repo, memctx, path):
2376 data, mode, copied = patchstore.getfile(path)
2376 data, mode, copied = patchstore.getfile(path)
2377 if data is None:
2377 if data is None:
2378 return None
2378 return None
2379 islink, isexec = mode
2379 islink, isexec = mode
2380 return memfilectx(repo, memctx, path, data, islink=islink,
2380 return memfilectx(repo, memctx, path, data, islink=islink,
2381 isexec=isexec, copied=copied)
2381 isexec=isexec, copied=copied)
2382
2382
2383 return getfilectx
2383 return getfilectx
2384
2384
2385 class memctx(committablectx):
2385 class memctx(committablectx):
2386 """Use memctx to perform in-memory commits via localrepo.commitctx().
2386 """Use memctx to perform in-memory commits via localrepo.commitctx().
2387
2387
2388 Revision information is supplied at initialization time while
2388 Revision information is supplied at initialization time while
2389 related files data and is made available through a callback
2389 related files data and is made available through a callback
2390 mechanism. 'repo' is the current localrepo, 'parents' is a
2390 mechanism. 'repo' is the current localrepo, 'parents' is a
2391 sequence of two parent revisions identifiers (pass None for every
2391 sequence of two parent revisions identifiers (pass None for every
2392 missing parent), 'text' is the commit message and 'files' lists
2392 missing parent), 'text' is the commit message and 'files' lists
2393 names of files touched by the revision (normalized and relative to
2393 names of files touched by the revision (normalized and relative to
2394 repository root).
2394 repository root).
2395
2395
2396 filectxfn(repo, memctx, path) is a callable receiving the
2396 filectxfn(repo, memctx, path) is a callable receiving the
2397 repository, the current memctx object and the normalized path of
2397 repository, the current memctx object and the normalized path of
2398 requested file, relative to repository root. It is fired by the
2398 requested file, relative to repository root. It is fired by the
2399 commit function for every file in 'files', but calls order is
2399 commit function for every file in 'files', but calls order is
2400 undefined. If the file is available in the revision being
2400 undefined. If the file is available in the revision being
2401 committed (updated or added), filectxfn returns a memfilectx
2401 committed (updated or added), filectxfn returns a memfilectx
2402 object. If the file was removed, filectxfn return None for recent
2402 object. If the file was removed, filectxfn return None for recent
2403 Mercurial. Moved files are represented by marking the source file
2403 Mercurial. Moved files are represented by marking the source file
2404 removed and the new file added with copy information (see
2404 removed and the new file added with copy information (see
2405 memfilectx).
2405 memfilectx).
2406
2406
2407 user receives the committer name and defaults to current
2407 user receives the committer name and defaults to current
2408 repository username, date is the commit date in any format
2408 repository username, date is the commit date in any format
2409 supported by dateutil.parsedate() and defaults to current date, extra
2409 supported by dateutil.parsedate() and defaults to current date, extra
2410 is a dictionary of metadata or is left empty.
2410 is a dictionary of metadata or is left empty.
2411 """
2411 """
2412
2412
2413 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2413 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2414 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2414 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2415 # this field to determine what to do in filectxfn.
2415 # this field to determine what to do in filectxfn.
2416 _returnnoneformissingfiles = True
2416 _returnnoneformissingfiles = True
2417
2417
2418 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2418 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2419 date=None, extra=None, branch=None, editor=False):
2419 date=None, extra=None, branch=None, editor=False):
2420 super(memctx, self).__init__(repo, text, user, date, extra)
2420 super(memctx, self).__init__(repo, text, user, date, extra)
2421 self._rev = None
2421 self._rev = None
2422 self._node = None
2422 self._node = None
2423 parents = [(p or nullid) for p in parents]
2423 parents = [(p or nullid) for p in parents]
2424 p1, p2 = parents
2424 p1, p2 = parents
2425 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
2425 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
2426 files = sorted(set(files))
2426 files = sorted(set(files))
2427 self._files = files
2427 self._files = files
2428 if branch is not None:
2428 if branch is not None:
2429 self._extra['branch'] = encoding.fromlocal(branch)
2429 self._extra['branch'] = encoding.fromlocal(branch)
2430 self.substate = {}
2430 self.substate = {}
2431
2431
2432 if isinstance(filectxfn, patch.filestore):
2432 if isinstance(filectxfn, patch.filestore):
2433 filectxfn = memfilefrompatch(filectxfn)
2433 filectxfn = memfilefrompatch(filectxfn)
2434 elif not callable(filectxfn):
2434 elif not callable(filectxfn):
2435 # if store is not callable, wrap it in a function
2435 # if store is not callable, wrap it in a function
2436 filectxfn = memfilefromctx(filectxfn)
2436 filectxfn = memfilefromctx(filectxfn)
2437
2437
2438 # memoizing increases performance for e.g. vcs convert scenarios.
2438 # memoizing increases performance for e.g. vcs convert scenarios.
2439 self._filectxfn = makecachingfilectxfn(filectxfn)
2439 self._filectxfn = makecachingfilectxfn(filectxfn)
2440
2440
2441 if editor:
2441 if editor:
2442 self._text = editor(self._repo, self, [])
2442 self._text = editor(self._repo, self, [])
2443 self._repo.savecommitmessage(self._text)
2443 self._repo.savecommitmessage(self._text)
2444
2444
2445 def filectx(self, path, filelog=None):
2445 def filectx(self, path, filelog=None):
2446 """get a file context from the working directory
2446 """get a file context from the working directory
2447
2447
2448 Returns None if file doesn't exist and should be removed."""
2448 Returns None if file doesn't exist and should be removed."""
2449 return self._filectxfn(self._repo, self, path)
2449 return self._filectxfn(self._repo, self, path)
2450
2450
2451 def commit(self):
2451 def commit(self):
2452 """commit context to the repo"""
2452 """commit context to the repo"""
2453 return self._repo.commitctx(self)
2453 return self._repo.commitctx(self)
2454
2454
2455 @propertycache
2455 @propertycache
2456 def _manifest(self):
2456 def _manifest(self):
2457 """generate a manifest based on the return values of filectxfn"""
2457 """generate a manifest based on the return values of filectxfn"""
2458
2458
2459 # keep this simple for now; just worry about p1
2459 # keep this simple for now; just worry about p1
2460 pctx = self._parents[0]
2460 pctx = self._parents[0]
2461 man = pctx.manifest().copy()
2461 man = pctx.manifest().copy()
2462
2462
2463 for f in self._status.modified:
2463 for f in self._status.modified:
2464 p1node = nullid
2464 p1node = nullid
2465 p2node = nullid
2465 p2node = nullid
2466 p = pctx[f].parents() # if file isn't in pctx, check p2?
2466 p = pctx[f].parents() # if file isn't in pctx, check p2?
2467 if len(p) > 0:
2467 if len(p) > 0:
2468 p1node = p[0].filenode()
2468 p1node = p[0].filenode()
2469 if len(p) > 1:
2469 if len(p) > 1:
2470 p2node = p[1].filenode()
2470 p2node = p[1].filenode()
2471 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2471 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2472
2472
2473 for f in self._status.added:
2473 for f in self._status.added:
2474 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2474 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2475
2475
2476 for f in self._status.removed:
2476 for f in self._status.removed:
2477 if f in man:
2477 if f in man:
2478 del man[f]
2478 del man[f]
2479
2479
2480 return man
2480 return man
2481
2481
2482 @propertycache
2482 @propertycache
2483 def _status(self):
2483 def _status(self):
2484 """Calculate exact status from ``files`` specified at construction
2484 """Calculate exact status from ``files`` specified at construction
2485 """
2485 """
2486 man1 = self.p1().manifest()
2486 man1 = self.p1().manifest()
2487 p2 = self._parents[1]
2487 p2 = self._parents[1]
2488 # "1 < len(self._parents)" can't be used for checking
2488 # "1 < len(self._parents)" can't be used for checking
2489 # existence of the 2nd parent, because "memctx._parents" is
2489 # existence of the 2nd parent, because "memctx._parents" is
2490 # explicitly initialized by the list, of which length is 2.
2490 # explicitly initialized by the list, of which length is 2.
2491 if p2.node() != nullid:
2491 if p2.node() != nullid:
2492 man2 = p2.manifest()
2492 man2 = p2.manifest()
2493 managing = lambda f: f in man1 or f in man2
2493 managing = lambda f: f in man1 or f in man2
2494 else:
2494 else:
2495 managing = lambda f: f in man1
2495 managing = lambda f: f in man1
2496
2496
2497 modified, added, removed = [], [], []
2497 modified, added, removed = [], [], []
2498 for f in self._files:
2498 for f in self._files:
2499 if not managing(f):
2499 if not managing(f):
2500 added.append(f)
2500 added.append(f)
2501 elif self[f]:
2501 elif self[f]:
2502 modified.append(f)
2502 modified.append(f)
2503 else:
2503 else:
2504 removed.append(f)
2504 removed.append(f)
2505
2505
2506 return scmutil.status(modified, added, removed, [], [], [], [])
2506 return scmutil.status(modified, added, removed, [], [], [], [])
2507
2507
2508 class memfilectx(committablefilectx):
2508 class memfilectx(committablefilectx):
2509 """memfilectx represents an in-memory file to commit.
2509 """memfilectx represents an in-memory file to commit.
2510
2510
2511 See memctx and committablefilectx for more details.
2511 See memctx and committablefilectx for more details.
2512 """
2512 """
2513 def __init__(self, repo, changectx, path, data, islink=False,
2513 def __init__(self, repo, changectx, path, data, islink=False,
2514 isexec=False, copied=None):
2514 isexec=False, copied=None):
2515 """
2515 """
2516 path is the normalized file path relative to repository root.
2516 path is the normalized file path relative to repository root.
2517 data is the file content as a string.
2517 data is the file content as a string.
2518 islink is True if the file is a symbolic link.
2518 islink is True if the file is a symbolic link.
2519 isexec is True if the file is executable.
2519 isexec is True if the file is executable.
2520 copied is the source file path if current file was copied in the
2520 copied is the source file path if current file was copied in the
2521 revision being committed, or None."""
2521 revision being committed, or None."""
2522 super(memfilectx, self).__init__(repo, path, None, changectx)
2522 super(memfilectx, self).__init__(repo, path, None, changectx)
2523 self._data = data
2523 self._data = data
2524 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2524 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2525 self._copied = None
2525 self._copied = None
2526 if copied:
2526 if copied:
2527 self._copied = (copied, nullid)
2527 self._copied = (copied, nullid)
2528
2528
2529 def data(self):
2529 def data(self):
2530 return self._data
2530 return self._data
2531
2531
2532 def remove(self, ignoremissing=False):
2532 def remove(self, ignoremissing=False):
2533 """wraps unlink for a repo's working directory"""
2533 """wraps unlink for a repo's working directory"""
2534 # need to figure out what to do here
2534 # need to figure out what to do here
2535 del self._changectx[self._path]
2535 del self._changectx[self._path]
2536
2536
2537 def write(self, data, flags, **kwargs):
2537 def write(self, data, flags, **kwargs):
2538 """wraps repo.wwrite"""
2538 """wraps repo.wwrite"""
2539 self._data = data
2539 self._data = data
2540
2540
2541 class overlayfilectx(committablefilectx):
2541 class overlayfilectx(committablefilectx):
2542 """Like memfilectx but take an original filectx and optional parameters to
2542 """Like memfilectx but take an original filectx and optional parameters to
2543 override parts of it. This is useful when fctx.data() is expensive (i.e.
2543 override parts of it. This is useful when fctx.data() is expensive (i.e.
2544 flag processor is expensive) and raw data, flags, and filenode could be
2544 flag processor is expensive) and raw data, flags, and filenode could be
2545 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2545 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2546 """
2546 """
2547
2547
2548 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2548 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2549 copied=None, ctx=None):
2549 copied=None, ctx=None):
2550 """originalfctx: filecontext to duplicate
2550 """originalfctx: filecontext to duplicate
2551
2551
2552 datafunc: None or a function to override data (file content). It is a
2552 datafunc: None or a function to override data (file content). It is a
2553 function to be lazy. path, flags, copied, ctx: None or overridden value
2553 function to be lazy. path, flags, copied, ctx: None or overridden value
2554
2554
2555 copied could be (path, rev), or False. copied could also be just path,
2555 copied could be (path, rev), or False. copied could also be just path,
2556 and will be converted to (path, nullid). This simplifies some callers.
2556 and will be converted to (path, nullid). This simplifies some callers.
2557 """
2557 """
2558
2558
2559 if path is None:
2559 if path is None:
2560 path = originalfctx.path()
2560 path = originalfctx.path()
2561 if ctx is None:
2561 if ctx is None:
2562 ctx = originalfctx.changectx()
2562 ctx = originalfctx.changectx()
2563 ctxmatch = lambda: True
2563 ctxmatch = lambda: True
2564 else:
2564 else:
2565 ctxmatch = lambda: ctx == originalfctx.changectx()
2565 ctxmatch = lambda: ctx == originalfctx.changectx()
2566
2566
2567 repo = originalfctx.repo()
2567 repo = originalfctx.repo()
2568 flog = originalfctx.filelog()
2568 flog = originalfctx.filelog()
2569 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2569 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2570
2570
2571 if copied is None:
2571 if copied is None:
2572 copied = originalfctx.renamed()
2572 copied = originalfctx.renamed()
2573 copiedmatch = lambda: True
2573 copiedmatch = lambda: True
2574 else:
2574 else:
2575 if copied and not isinstance(copied, tuple):
2575 if copied and not isinstance(copied, tuple):
2576 # repo._filecommit will recalculate copyrev so nullid is okay
2576 # repo._filecommit will recalculate copyrev so nullid is okay
2577 copied = (copied, nullid)
2577 copied = (copied, nullid)
2578 copiedmatch = lambda: copied == originalfctx.renamed()
2578 copiedmatch = lambda: copied == originalfctx.renamed()
2579
2579
2580 # When data, copied (could affect data), ctx (could affect filelog
2580 # When data, copied (could affect data), ctx (could affect filelog
2581 # parents) are not overridden, rawdata, rawflags, and filenode may be
2581 # parents) are not overridden, rawdata, rawflags, and filenode may be
2582 # reused (repo._filecommit should double check filelog parents).
2582 # reused (repo._filecommit should double check filelog parents).
2583 #
2583 #
2584 # path, flags are not hashed in filelog (but in manifestlog) so they do
2584 # path, flags are not hashed in filelog (but in manifestlog) so they do
2585 # not affect reusable here.
2585 # not affect reusable here.
2586 #
2586 #
2587 # If ctx or copied is overridden to a same value with originalfctx,
2587 # If ctx or copied is overridden to a same value with originalfctx,
2588 # still consider it's reusable. originalfctx.renamed() may be a bit
2588 # still consider it's reusable. originalfctx.renamed() may be a bit
2589 # expensive so it's not called unless necessary. Assuming datafunc is
2589 # expensive so it's not called unless necessary. Assuming datafunc is
2590 # always expensive, do not call it for this "reusable" test.
2590 # always expensive, do not call it for this "reusable" test.
2591 reusable = datafunc is None and ctxmatch() and copiedmatch()
2591 reusable = datafunc is None and ctxmatch() and copiedmatch()
2592
2592
2593 if datafunc is None:
2593 if datafunc is None:
2594 datafunc = originalfctx.data
2594 datafunc = originalfctx.data
2595 if flags is None:
2595 if flags is None:
2596 flags = originalfctx.flags()
2596 flags = originalfctx.flags()
2597
2597
2598 self._datafunc = datafunc
2598 self._datafunc = datafunc
2599 self._flags = flags
2599 self._flags = flags
2600 self._copied = copied
2600 self._copied = copied
2601
2601
2602 if reusable:
2602 if reusable:
2603 # copy extra fields from originalfctx
2603 # copy extra fields from originalfctx
2604 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2604 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2605 for attr_ in attrs:
2605 for attr_ in attrs:
2606 if util.safehasattr(originalfctx, attr_):
2606 if util.safehasattr(originalfctx, attr_):
2607 setattr(self, attr_, getattr(originalfctx, attr_))
2607 setattr(self, attr_, getattr(originalfctx, attr_))
2608
2608
2609 def data(self):
2609 def data(self):
2610 return self._datafunc()
2610 return self._datafunc()
2611
2611
2612 class metadataonlyctx(committablectx):
2612 class metadataonlyctx(committablectx):
2613 """Like memctx but it's reusing the manifest of different commit.
2613 """Like memctx but it's reusing the manifest of different commit.
2614 Intended to be used by lightweight operations that are creating
2614 Intended to be used by lightweight operations that are creating
2615 metadata-only changes.
2615 metadata-only changes.
2616
2616
2617 Revision information is supplied at initialization time. 'repo' is the
2617 Revision information is supplied at initialization time. 'repo' is the
2618 current localrepo, 'ctx' is original revision which manifest we're reuisng
2618 current localrepo, 'ctx' is original revision which manifest we're reuisng
2619 'parents' is a sequence of two parent revisions identifiers (pass None for
2619 'parents' is a sequence of two parent revisions identifiers (pass None for
2620 every missing parent), 'text' is the commit.
2620 every missing parent), 'text' is the commit.
2621
2621
2622 user receives the committer name and defaults to current repository
2622 user receives the committer name and defaults to current repository
2623 username, date is the commit date in any format supported by
2623 username, date is the commit date in any format supported by
2624 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2624 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2625 metadata or is left empty.
2625 metadata or is left empty.
2626 """
2626 """
2627 def __new__(cls, repo, originalctx, *args, **kwargs):
2627 def __new__(cls, repo, originalctx, *args, **kwargs):
2628 return super(metadataonlyctx, cls).__new__(cls, repo)
2628 return super(metadataonlyctx, cls).__new__(cls, repo)
2629
2629
2630 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2630 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2631 date=None, extra=None, editor=False):
2631 date=None, extra=None, editor=False):
2632 if text is None:
2632 if text is None:
2633 text = originalctx.description()
2633 text = originalctx.description()
2634 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2634 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2635 self._rev = None
2635 self._rev = None
2636 self._node = None
2636 self._node = None
2637 self._originalctx = originalctx
2637 self._originalctx = originalctx
2638 self._manifestnode = originalctx.manifestnode()
2638 self._manifestnode = originalctx.manifestnode()
2639 if parents is None:
2639 if parents is None:
2640 parents = originalctx.parents()
2640 parents = originalctx.parents()
2641 else:
2641 else:
2642 parents = [repo[p] for p in parents if p is not None]
2642 parents = [repo[p] for p in parents if p is not None]
2643 parents = parents[:]
2643 parents = parents[:]
2644 while len(parents) < 2:
2644 while len(parents) < 2:
2645 parents.append(repo[nullid])
2645 parents.append(repo[nullid])
2646 p1, p2 = self._parents = parents
2646 p1, p2 = self._parents = parents
2647
2647
2648 # sanity check to ensure that the reused manifest parents are
2648 # sanity check to ensure that the reused manifest parents are
2649 # manifests of our commit parents
2649 # manifests of our commit parents
2650 mp1, mp2 = self.manifestctx().parents
2650 mp1, mp2 = self.manifestctx().parents
2651 if p1 != nullid and p1.manifestnode() != mp1:
2651 if p1 != nullid and p1.manifestnode() != mp1:
2652 raise RuntimeError('can\'t reuse the manifest: '
2652 raise RuntimeError('can\'t reuse the manifest: '
2653 'its p1 doesn\'t match the new ctx p1')
2653 'its p1 doesn\'t match the new ctx p1')
2654 if p2 != nullid and p2.manifestnode() != mp2:
2654 if p2 != nullid and p2.manifestnode() != mp2:
2655 raise RuntimeError('can\'t reuse the manifest: '
2655 raise RuntimeError('can\'t reuse the manifest: '
2656 'its p2 doesn\'t match the new ctx p2')
2656 'its p2 doesn\'t match the new ctx p2')
2657
2657
2658 self._files = originalctx.files()
2658 self._files = originalctx.files()
2659 self.substate = {}
2659 self.substate = {}
2660
2660
2661 if editor:
2661 if editor:
2662 self._text = editor(self._repo, self, [])
2662 self._text = editor(self._repo, self, [])
2663 self._repo.savecommitmessage(self._text)
2663 self._repo.savecommitmessage(self._text)
2664
2664
2665 def manifestnode(self):
2665 def manifestnode(self):
2666 return self._manifestnode
2666 return self._manifestnode
2667
2667
2668 @property
2668 @property
2669 def _manifestctx(self):
2669 def _manifestctx(self):
2670 return self._repo.manifestlog[self._manifestnode]
2670 return self._repo.manifestlog[self._manifestnode]
2671
2671
2672 def filectx(self, path, filelog=None):
2672 def filectx(self, path, filelog=None):
2673 return self._originalctx.filectx(path, filelog=filelog)
2673 return self._originalctx.filectx(path, filelog=filelog)
2674
2674
2675 def commit(self):
2675 def commit(self):
2676 """commit context to the repo"""
2676 """commit context to the repo"""
2677 return self._repo.commitctx(self)
2677 return self._repo.commitctx(self)
2678
2678
2679 @property
2679 @property
2680 def _manifest(self):
2680 def _manifest(self):
2681 return self._originalctx.manifest()
2681 return self._originalctx.manifest()
2682
2682
2683 @propertycache
2683 @propertycache
2684 def _status(self):
2684 def _status(self):
2685 """Calculate exact status from ``files`` specified in the ``origctx``
2685 """Calculate exact status from ``files`` specified in the ``origctx``
2686 and parents manifests.
2686 and parents manifests.
2687 """
2687 """
2688 man1 = self.p1().manifest()
2688 man1 = self.p1().manifest()
2689 p2 = self._parents[1]
2689 p2 = self._parents[1]
2690 # "1 < len(self._parents)" can't be used for checking
2690 # "1 < len(self._parents)" can't be used for checking
2691 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2691 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2692 # explicitly initialized by the list, of which length is 2.
2692 # explicitly initialized by the list, of which length is 2.
2693 if p2.node() != nullid:
2693 if p2.node() != nullid:
2694 man2 = p2.manifest()
2694 man2 = p2.manifest()
2695 managing = lambda f: f in man1 or f in man2
2695 managing = lambda f: f in man1 or f in man2
2696 else:
2696 else:
2697 managing = lambda f: f in man1
2697 managing = lambda f: f in man1
2698
2698
2699 modified, added, removed = [], [], []
2699 modified, added, removed = [], [], []
2700 for f in self._files:
2700 for f in self._files:
2701 if not managing(f):
2701 if not managing(f):
2702 added.append(f)
2702 added.append(f)
2703 elif f in self:
2703 elif f in self:
2704 modified.append(f)
2704 modified.append(f)
2705 else:
2705 else:
2706 removed.append(f)
2706 removed.append(f)
2707
2707
2708 return scmutil.status(modified, added, removed, [], [], [], [])
2708 return scmutil.status(modified, added, removed, [], [], [], [])
2709
2709
2710 class arbitraryfilectx(object):
2710 class arbitraryfilectx(object):
2711 """Allows you to use filectx-like functions on a file in an arbitrary
2711 """Allows you to use filectx-like functions on a file in an arbitrary
2712 location on disk, possibly not in the working directory.
2712 location on disk, possibly not in the working directory.
2713 """
2713 """
2714 def __init__(self, path, repo=None):
2714 def __init__(self, path, repo=None):
2715 # Repo is optional because contrib/simplemerge uses this class.
2715 # Repo is optional because contrib/simplemerge uses this class.
2716 self._repo = repo
2716 self._repo = repo
2717 self._path = path
2717 self._path = path
2718
2718
2719 def cmp(self, fctx):
2719 def cmp(self, fctx):
2720 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2720 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2721 # path if either side is a symlink.
2721 # path if either side is a symlink.
2722 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2722 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2723 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2723 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2724 # Add a fast-path for merge if both sides are disk-backed.
2724 # Add a fast-path for merge if both sides are disk-backed.
2725 # Note that filecmp uses the opposite return values (True if same)
2725 # Note that filecmp uses the opposite return values (True if same)
2726 # from our cmp functions (True if different).
2726 # from our cmp functions (True if different).
2727 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2727 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2728 return self.data() != fctx.data()
2728 return self.data() != fctx.data()
2729
2729
2730 def path(self):
2730 def path(self):
2731 return self._path
2731 return self._path
2732
2732
2733 def flags(self):
2733 def flags(self):
2734 return ''
2734 return ''
2735
2735
2736 def data(self):
2736 def data(self):
2737 return util.readfile(self._path)
2737 return util.readfile(self._path)
2738
2738
2739 def decodeddata(self):
2739 def decodeddata(self):
2740 with open(self._path, "rb") as f:
2740 with open(self._path, "rb") as f:
2741 return f.read()
2741 return f.read()
2742
2742
2743 def remove(self):
2743 def remove(self):
2744 util.unlink(self._path)
2744 util.unlink(self._path)
2745
2745
2746 def write(self, data, flags, **kwargs):
2746 def write(self, data, flags, **kwargs):
2747 assert not flags
2747 assert not flags
2748 with open(self._path, "w") as f:
2748 with open(self._path, "w") as f:
2749 f.write(data)
2749 f.write(data)
@@ -1,2844 +1,2845 b''
1 # debugcommands.py - command processing for debug* commands
1 # debugcommands.py - command processing for debug* commands
2 #
2 #
3 # Copyright 2005-2016 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2016 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import codecs
10 import codecs
11 import collections
11 import collections
12 import difflib
12 import difflib
13 import errno
13 import errno
14 import operator
14 import operator
15 import os
15 import os
16 import random
16 import random
17 import socket
17 import socket
18 import ssl
18 import ssl
19 import stat
19 import string
20 import string
20 import subprocess
21 import subprocess
21 import sys
22 import sys
22 import tempfile
23 import tempfile
23 import time
24 import time
24
25
25 from .i18n import _
26 from .i18n import _
26 from .node import (
27 from .node import (
27 bin,
28 bin,
28 hex,
29 hex,
29 nullhex,
30 nullhex,
30 nullid,
31 nullid,
31 nullrev,
32 nullrev,
32 short,
33 short,
33 )
34 )
34 from . import (
35 from . import (
35 bundle2,
36 bundle2,
36 changegroup,
37 changegroup,
37 cmdutil,
38 cmdutil,
38 color,
39 color,
39 context,
40 context,
40 dagparser,
41 dagparser,
41 dagutil,
42 dagutil,
42 encoding,
43 encoding,
43 error,
44 error,
44 exchange,
45 exchange,
45 extensions,
46 extensions,
46 filemerge,
47 filemerge,
47 fileset,
48 fileset,
48 formatter,
49 formatter,
49 hg,
50 hg,
50 localrepo,
51 localrepo,
51 lock as lockmod,
52 lock as lockmod,
52 logcmdutil,
53 logcmdutil,
53 merge as mergemod,
54 merge as mergemod,
54 obsolete,
55 obsolete,
55 obsutil,
56 obsutil,
56 phases,
57 phases,
57 policy,
58 policy,
58 pvec,
59 pvec,
59 pycompat,
60 pycompat,
60 registrar,
61 registrar,
61 repair,
62 repair,
62 revlog,
63 revlog,
63 revset,
64 revset,
64 revsetlang,
65 revsetlang,
65 scmutil,
66 scmutil,
66 setdiscovery,
67 setdiscovery,
67 simplemerge,
68 simplemerge,
68 smartset,
69 smartset,
69 sshpeer,
70 sshpeer,
70 sslutil,
71 sslutil,
71 streamclone,
72 streamclone,
72 templater,
73 templater,
73 treediscovery,
74 treediscovery,
74 upgrade,
75 upgrade,
75 url as urlmod,
76 url as urlmod,
76 util,
77 util,
77 vfs as vfsmod,
78 vfs as vfsmod,
78 wireprotoserver,
79 wireprotoserver,
79 )
80 )
80 from .utils import dateutil
81 from .utils import dateutil
81
82
82 release = lockmod.release
83 release = lockmod.release
83
84
84 command = registrar.command()
85 command = registrar.command()
85
86
86 @command('debugancestor', [], _('[INDEX] REV1 REV2'), optionalrepo=True)
87 @command('debugancestor', [], _('[INDEX] REV1 REV2'), optionalrepo=True)
87 def debugancestor(ui, repo, *args):
88 def debugancestor(ui, repo, *args):
88 """find the ancestor revision of two revisions in a given index"""
89 """find the ancestor revision of two revisions in a given index"""
89 if len(args) == 3:
90 if len(args) == 3:
90 index, rev1, rev2 = args
91 index, rev1, rev2 = args
91 r = revlog.revlog(vfsmod.vfs(pycompat.getcwd(), audit=False), index)
92 r = revlog.revlog(vfsmod.vfs(pycompat.getcwd(), audit=False), index)
92 lookup = r.lookup
93 lookup = r.lookup
93 elif len(args) == 2:
94 elif len(args) == 2:
94 if not repo:
95 if not repo:
95 raise error.Abort(_('there is no Mercurial repository here '
96 raise error.Abort(_('there is no Mercurial repository here '
96 '(.hg not found)'))
97 '(.hg not found)'))
97 rev1, rev2 = args
98 rev1, rev2 = args
98 r = repo.changelog
99 r = repo.changelog
99 lookup = repo.lookup
100 lookup = repo.lookup
100 else:
101 else:
101 raise error.Abort(_('either two or three arguments required'))
102 raise error.Abort(_('either two or three arguments required'))
102 a = r.ancestor(lookup(rev1), lookup(rev2))
103 a = r.ancestor(lookup(rev1), lookup(rev2))
103 ui.write('%d:%s\n' % (r.rev(a), hex(a)))
104 ui.write('%d:%s\n' % (r.rev(a), hex(a)))
104
105
105 @command('debugapplystreamclonebundle', [], 'FILE')
106 @command('debugapplystreamclonebundle', [], 'FILE')
106 def debugapplystreamclonebundle(ui, repo, fname):
107 def debugapplystreamclonebundle(ui, repo, fname):
107 """apply a stream clone bundle file"""
108 """apply a stream clone bundle file"""
108 f = hg.openpath(ui, fname)
109 f = hg.openpath(ui, fname)
109 gen = exchange.readbundle(ui, f, fname)
110 gen = exchange.readbundle(ui, f, fname)
110 gen.apply(repo)
111 gen.apply(repo)
111
112
112 @command('debugbuilddag',
113 @command('debugbuilddag',
113 [('m', 'mergeable-file', None, _('add single file mergeable changes')),
114 [('m', 'mergeable-file', None, _('add single file mergeable changes')),
114 ('o', 'overwritten-file', None, _('add single file all revs overwrite')),
115 ('o', 'overwritten-file', None, _('add single file all revs overwrite')),
115 ('n', 'new-file', None, _('add new file at each rev'))],
116 ('n', 'new-file', None, _('add new file at each rev'))],
116 _('[OPTION]... [TEXT]'))
117 _('[OPTION]... [TEXT]'))
117 def debugbuilddag(ui, repo, text=None,
118 def debugbuilddag(ui, repo, text=None,
118 mergeable_file=False,
119 mergeable_file=False,
119 overwritten_file=False,
120 overwritten_file=False,
120 new_file=False):
121 new_file=False):
121 """builds a repo with a given DAG from scratch in the current empty repo
122 """builds a repo with a given DAG from scratch in the current empty repo
122
123
123 The description of the DAG is read from stdin if not given on the
124 The description of the DAG is read from stdin if not given on the
124 command line.
125 command line.
125
126
126 Elements:
127 Elements:
127
128
128 - "+n" is a linear run of n nodes based on the current default parent
129 - "+n" is a linear run of n nodes based on the current default parent
129 - "." is a single node based on the current default parent
130 - "." is a single node based on the current default parent
130 - "$" resets the default parent to null (implied at the start);
131 - "$" resets the default parent to null (implied at the start);
131 otherwise the default parent is always the last node created
132 otherwise the default parent is always the last node created
132 - "<p" sets the default parent to the backref p
133 - "<p" sets the default parent to the backref p
133 - "*p" is a fork at parent p, which is a backref
134 - "*p" is a fork at parent p, which is a backref
134 - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
135 - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
135 - "/p2" is a merge of the preceding node and p2
136 - "/p2" is a merge of the preceding node and p2
136 - ":tag" defines a local tag for the preceding node
137 - ":tag" defines a local tag for the preceding node
137 - "@branch" sets the named branch for subsequent nodes
138 - "@branch" sets the named branch for subsequent nodes
138 - "#...\\n" is a comment up to the end of the line
139 - "#...\\n" is a comment up to the end of the line
139
140
140 Whitespace between the above elements is ignored.
141 Whitespace between the above elements is ignored.
141
142
142 A backref is either
143 A backref is either
143
144
144 - a number n, which references the node curr-n, where curr is the current
145 - a number n, which references the node curr-n, where curr is the current
145 node, or
146 node, or
146 - the name of a local tag you placed earlier using ":tag", or
147 - the name of a local tag you placed earlier using ":tag", or
147 - empty to denote the default parent.
148 - empty to denote the default parent.
148
149
149 All string valued-elements are either strictly alphanumeric, or must
150 All string valued-elements are either strictly alphanumeric, or must
150 be enclosed in double quotes ("..."), with "\\" as escape character.
151 be enclosed in double quotes ("..."), with "\\" as escape character.
151 """
152 """
152
153
153 if text is None:
154 if text is None:
154 ui.status(_("reading DAG from stdin\n"))
155 ui.status(_("reading DAG from stdin\n"))
155 text = ui.fin.read()
156 text = ui.fin.read()
156
157
157 cl = repo.changelog
158 cl = repo.changelog
158 if len(cl) > 0:
159 if len(cl) > 0:
159 raise error.Abort(_('repository is not empty'))
160 raise error.Abort(_('repository is not empty'))
160
161
161 # determine number of revs in DAG
162 # determine number of revs in DAG
162 total = 0
163 total = 0
163 for type, data in dagparser.parsedag(text):
164 for type, data in dagparser.parsedag(text):
164 if type == 'n':
165 if type == 'n':
165 total += 1
166 total += 1
166
167
167 if mergeable_file:
168 if mergeable_file:
168 linesperrev = 2
169 linesperrev = 2
169 # make a file with k lines per rev
170 # make a file with k lines per rev
170 initialmergedlines = ['%d' % i for i in xrange(0, total * linesperrev)]
171 initialmergedlines = ['%d' % i for i in xrange(0, total * linesperrev)]
171 initialmergedlines.append("")
172 initialmergedlines.append("")
172
173
173 tags = []
174 tags = []
174
175
175 wlock = lock = tr = None
176 wlock = lock = tr = None
176 try:
177 try:
177 wlock = repo.wlock()
178 wlock = repo.wlock()
178 lock = repo.lock()
179 lock = repo.lock()
179 tr = repo.transaction("builddag")
180 tr = repo.transaction("builddag")
180
181
181 at = -1
182 at = -1
182 atbranch = 'default'
183 atbranch = 'default'
183 nodeids = []
184 nodeids = []
184 id = 0
185 id = 0
185 ui.progress(_('building'), id, unit=_('revisions'), total=total)
186 ui.progress(_('building'), id, unit=_('revisions'), total=total)
186 for type, data in dagparser.parsedag(text):
187 for type, data in dagparser.parsedag(text):
187 if type == 'n':
188 if type == 'n':
188 ui.note(('node %s\n' % pycompat.bytestr(data)))
189 ui.note(('node %s\n' % pycompat.bytestr(data)))
189 id, ps = data
190 id, ps = data
190
191
191 files = []
192 files = []
192 filecontent = {}
193 filecontent = {}
193
194
194 p2 = None
195 p2 = None
195 if mergeable_file:
196 if mergeable_file:
196 fn = "mf"
197 fn = "mf"
197 p1 = repo[ps[0]]
198 p1 = repo[ps[0]]
198 if len(ps) > 1:
199 if len(ps) > 1:
199 p2 = repo[ps[1]]
200 p2 = repo[ps[1]]
200 pa = p1.ancestor(p2)
201 pa = p1.ancestor(p2)
201 base, local, other = [x[fn].data() for x in (pa, p1,
202 base, local, other = [x[fn].data() for x in (pa, p1,
202 p2)]
203 p2)]
203 m3 = simplemerge.Merge3Text(base, local, other)
204 m3 = simplemerge.Merge3Text(base, local, other)
204 ml = [l.strip() for l in m3.merge_lines()]
205 ml = [l.strip() for l in m3.merge_lines()]
205 ml.append("")
206 ml.append("")
206 elif at > 0:
207 elif at > 0:
207 ml = p1[fn].data().split("\n")
208 ml = p1[fn].data().split("\n")
208 else:
209 else:
209 ml = initialmergedlines
210 ml = initialmergedlines
210 ml[id * linesperrev] += " r%i" % id
211 ml[id * linesperrev] += " r%i" % id
211 mergedtext = "\n".join(ml)
212 mergedtext = "\n".join(ml)
212 files.append(fn)
213 files.append(fn)
213 filecontent[fn] = mergedtext
214 filecontent[fn] = mergedtext
214
215
215 if overwritten_file:
216 if overwritten_file:
216 fn = "of"
217 fn = "of"
217 files.append(fn)
218 files.append(fn)
218 filecontent[fn] = "r%i\n" % id
219 filecontent[fn] = "r%i\n" % id
219
220
220 if new_file:
221 if new_file:
221 fn = "nf%i" % id
222 fn = "nf%i" % id
222 files.append(fn)
223 files.append(fn)
223 filecontent[fn] = "r%i\n" % id
224 filecontent[fn] = "r%i\n" % id
224 if len(ps) > 1:
225 if len(ps) > 1:
225 if not p2:
226 if not p2:
226 p2 = repo[ps[1]]
227 p2 = repo[ps[1]]
227 for fn in p2:
228 for fn in p2:
228 if fn.startswith("nf"):
229 if fn.startswith("nf"):
229 files.append(fn)
230 files.append(fn)
230 filecontent[fn] = p2[fn].data()
231 filecontent[fn] = p2[fn].data()
231
232
232 def fctxfn(repo, cx, path):
233 def fctxfn(repo, cx, path):
233 if path in filecontent:
234 if path in filecontent:
234 return context.memfilectx(repo, cx, path,
235 return context.memfilectx(repo, cx, path,
235 filecontent[path])
236 filecontent[path])
236 return None
237 return None
237
238
238 if len(ps) == 0 or ps[0] < 0:
239 if len(ps) == 0 or ps[0] < 0:
239 pars = [None, None]
240 pars = [None, None]
240 elif len(ps) == 1:
241 elif len(ps) == 1:
241 pars = [nodeids[ps[0]], None]
242 pars = [nodeids[ps[0]], None]
242 else:
243 else:
243 pars = [nodeids[p] for p in ps]
244 pars = [nodeids[p] for p in ps]
244 cx = context.memctx(repo, pars, "r%i" % id, files, fctxfn,
245 cx = context.memctx(repo, pars, "r%i" % id, files, fctxfn,
245 date=(id, 0),
246 date=(id, 0),
246 user="debugbuilddag",
247 user="debugbuilddag",
247 extra={'branch': atbranch})
248 extra={'branch': atbranch})
248 nodeid = repo.commitctx(cx)
249 nodeid = repo.commitctx(cx)
249 nodeids.append(nodeid)
250 nodeids.append(nodeid)
250 at = id
251 at = id
251 elif type == 'l':
252 elif type == 'l':
252 id, name = data
253 id, name = data
253 ui.note(('tag %s\n' % name))
254 ui.note(('tag %s\n' % name))
254 tags.append("%s %s\n" % (hex(repo.changelog.node(id)), name))
255 tags.append("%s %s\n" % (hex(repo.changelog.node(id)), name))
255 elif type == 'a':
256 elif type == 'a':
256 ui.note(('branch %s\n' % data))
257 ui.note(('branch %s\n' % data))
257 atbranch = data
258 atbranch = data
258 ui.progress(_('building'), id, unit=_('revisions'), total=total)
259 ui.progress(_('building'), id, unit=_('revisions'), total=total)
259 tr.close()
260 tr.close()
260
261
261 if tags:
262 if tags:
262 repo.vfs.write("localtags", "".join(tags))
263 repo.vfs.write("localtags", "".join(tags))
263 finally:
264 finally:
264 ui.progress(_('building'), None)
265 ui.progress(_('building'), None)
265 release(tr, lock, wlock)
266 release(tr, lock, wlock)
266
267
267 def _debugchangegroup(ui, gen, all=None, indent=0, **opts):
268 def _debugchangegroup(ui, gen, all=None, indent=0, **opts):
268 indent_string = ' ' * indent
269 indent_string = ' ' * indent
269 if all:
270 if all:
270 ui.write(("%sformat: id, p1, p2, cset, delta base, len(delta)\n")
271 ui.write(("%sformat: id, p1, p2, cset, delta base, len(delta)\n")
271 % indent_string)
272 % indent_string)
272
273
273 def showchunks(named):
274 def showchunks(named):
274 ui.write("\n%s%s\n" % (indent_string, named))
275 ui.write("\n%s%s\n" % (indent_string, named))
275 for deltadata in gen.deltaiter():
276 for deltadata in gen.deltaiter():
276 node, p1, p2, cs, deltabase, delta, flags = deltadata
277 node, p1, p2, cs, deltabase, delta, flags = deltadata
277 ui.write("%s%s %s %s %s %s %d\n" %
278 ui.write("%s%s %s %s %s %s %d\n" %
278 (indent_string, hex(node), hex(p1), hex(p2),
279 (indent_string, hex(node), hex(p1), hex(p2),
279 hex(cs), hex(deltabase), len(delta)))
280 hex(cs), hex(deltabase), len(delta)))
280
281
281 chunkdata = gen.changelogheader()
282 chunkdata = gen.changelogheader()
282 showchunks("changelog")
283 showchunks("changelog")
283 chunkdata = gen.manifestheader()
284 chunkdata = gen.manifestheader()
284 showchunks("manifest")
285 showchunks("manifest")
285 for chunkdata in iter(gen.filelogheader, {}):
286 for chunkdata in iter(gen.filelogheader, {}):
286 fname = chunkdata['filename']
287 fname = chunkdata['filename']
287 showchunks(fname)
288 showchunks(fname)
288 else:
289 else:
289 if isinstance(gen, bundle2.unbundle20):
290 if isinstance(gen, bundle2.unbundle20):
290 raise error.Abort(_('use debugbundle2 for this file'))
291 raise error.Abort(_('use debugbundle2 for this file'))
291 chunkdata = gen.changelogheader()
292 chunkdata = gen.changelogheader()
292 for deltadata in gen.deltaiter():
293 for deltadata in gen.deltaiter():
293 node, p1, p2, cs, deltabase, delta, flags = deltadata
294 node, p1, p2, cs, deltabase, delta, flags = deltadata
294 ui.write("%s%s\n" % (indent_string, hex(node)))
295 ui.write("%s%s\n" % (indent_string, hex(node)))
295
296
296 def _debugobsmarkers(ui, part, indent=0, **opts):
297 def _debugobsmarkers(ui, part, indent=0, **opts):
297 """display version and markers contained in 'data'"""
298 """display version and markers contained in 'data'"""
298 opts = pycompat.byteskwargs(opts)
299 opts = pycompat.byteskwargs(opts)
299 data = part.read()
300 data = part.read()
300 indent_string = ' ' * indent
301 indent_string = ' ' * indent
301 try:
302 try:
302 version, markers = obsolete._readmarkers(data)
303 version, markers = obsolete._readmarkers(data)
303 except error.UnknownVersion as exc:
304 except error.UnknownVersion as exc:
304 msg = "%sunsupported version: %s (%d bytes)\n"
305 msg = "%sunsupported version: %s (%d bytes)\n"
305 msg %= indent_string, exc.version, len(data)
306 msg %= indent_string, exc.version, len(data)
306 ui.write(msg)
307 ui.write(msg)
307 else:
308 else:
308 msg = "%sversion: %d (%d bytes)\n"
309 msg = "%sversion: %d (%d bytes)\n"
309 msg %= indent_string, version, len(data)
310 msg %= indent_string, version, len(data)
310 ui.write(msg)
311 ui.write(msg)
311 fm = ui.formatter('debugobsolete', opts)
312 fm = ui.formatter('debugobsolete', opts)
312 for rawmarker in sorted(markers):
313 for rawmarker in sorted(markers):
313 m = obsutil.marker(None, rawmarker)
314 m = obsutil.marker(None, rawmarker)
314 fm.startitem()
315 fm.startitem()
315 fm.plain(indent_string)
316 fm.plain(indent_string)
316 cmdutil.showmarker(fm, m)
317 cmdutil.showmarker(fm, m)
317 fm.end()
318 fm.end()
318
319
319 def _debugphaseheads(ui, data, indent=0):
320 def _debugphaseheads(ui, data, indent=0):
320 """display version and markers contained in 'data'"""
321 """display version and markers contained in 'data'"""
321 indent_string = ' ' * indent
322 indent_string = ' ' * indent
322 headsbyphase = phases.binarydecode(data)
323 headsbyphase = phases.binarydecode(data)
323 for phase in phases.allphases:
324 for phase in phases.allphases:
324 for head in headsbyphase[phase]:
325 for head in headsbyphase[phase]:
325 ui.write(indent_string)
326 ui.write(indent_string)
326 ui.write('%s %s\n' % (hex(head), phases.phasenames[phase]))
327 ui.write('%s %s\n' % (hex(head), phases.phasenames[phase]))
327
328
328 def _quasirepr(thing):
329 def _quasirepr(thing):
329 if isinstance(thing, (dict, util.sortdict, collections.OrderedDict)):
330 if isinstance(thing, (dict, util.sortdict, collections.OrderedDict)):
330 return '{%s}' % (
331 return '{%s}' % (
331 b', '.join(b'%s: %s' % (k, thing[k]) for k in sorted(thing)))
332 b', '.join(b'%s: %s' % (k, thing[k]) for k in sorted(thing)))
332 return pycompat.bytestr(repr(thing))
333 return pycompat.bytestr(repr(thing))
333
334
334 def _debugbundle2(ui, gen, all=None, **opts):
335 def _debugbundle2(ui, gen, all=None, **opts):
335 """lists the contents of a bundle2"""
336 """lists the contents of a bundle2"""
336 if not isinstance(gen, bundle2.unbundle20):
337 if not isinstance(gen, bundle2.unbundle20):
337 raise error.Abort(_('not a bundle2 file'))
338 raise error.Abort(_('not a bundle2 file'))
338 ui.write(('Stream params: %s\n' % _quasirepr(gen.params)))
339 ui.write(('Stream params: %s\n' % _quasirepr(gen.params)))
339 parttypes = opts.get(r'part_type', [])
340 parttypes = opts.get(r'part_type', [])
340 for part in gen.iterparts():
341 for part in gen.iterparts():
341 if parttypes and part.type not in parttypes:
342 if parttypes and part.type not in parttypes:
342 continue
343 continue
343 ui.write('%s -- %s\n' % (part.type, _quasirepr(part.params)))
344 ui.write('%s -- %s\n' % (part.type, _quasirepr(part.params)))
344 if part.type == 'changegroup':
345 if part.type == 'changegroup':
345 version = part.params.get('version', '01')
346 version = part.params.get('version', '01')
346 cg = changegroup.getunbundler(version, part, 'UN')
347 cg = changegroup.getunbundler(version, part, 'UN')
347 _debugchangegroup(ui, cg, all=all, indent=4, **opts)
348 _debugchangegroup(ui, cg, all=all, indent=4, **opts)
348 if part.type == 'obsmarkers':
349 if part.type == 'obsmarkers':
349 _debugobsmarkers(ui, part, indent=4, **opts)
350 _debugobsmarkers(ui, part, indent=4, **opts)
350 if part.type == 'phase-heads':
351 if part.type == 'phase-heads':
351 _debugphaseheads(ui, part, indent=4)
352 _debugphaseheads(ui, part, indent=4)
352
353
353 @command('debugbundle',
354 @command('debugbundle',
354 [('a', 'all', None, _('show all details')),
355 [('a', 'all', None, _('show all details')),
355 ('', 'part-type', [], _('show only the named part type')),
356 ('', 'part-type', [], _('show only the named part type')),
356 ('', 'spec', None, _('print the bundlespec of the bundle'))],
357 ('', 'spec', None, _('print the bundlespec of the bundle'))],
357 _('FILE'),
358 _('FILE'),
358 norepo=True)
359 norepo=True)
359 def debugbundle(ui, bundlepath, all=None, spec=None, **opts):
360 def debugbundle(ui, bundlepath, all=None, spec=None, **opts):
360 """lists the contents of a bundle"""
361 """lists the contents of a bundle"""
361 with hg.openpath(ui, bundlepath) as f:
362 with hg.openpath(ui, bundlepath) as f:
362 if spec:
363 if spec:
363 spec = exchange.getbundlespec(ui, f)
364 spec = exchange.getbundlespec(ui, f)
364 ui.write('%s\n' % spec)
365 ui.write('%s\n' % spec)
365 return
366 return
366
367
367 gen = exchange.readbundle(ui, f, bundlepath)
368 gen = exchange.readbundle(ui, f, bundlepath)
368 if isinstance(gen, bundle2.unbundle20):
369 if isinstance(gen, bundle2.unbundle20):
369 return _debugbundle2(ui, gen, all=all, **opts)
370 return _debugbundle2(ui, gen, all=all, **opts)
370 _debugchangegroup(ui, gen, all=all, **opts)
371 _debugchangegroup(ui, gen, all=all, **opts)
371
372
372 @command('debugcapabilities',
373 @command('debugcapabilities',
373 [], _('PATH'),
374 [], _('PATH'),
374 norepo=True)
375 norepo=True)
375 def debugcapabilities(ui, path, **opts):
376 def debugcapabilities(ui, path, **opts):
376 """lists the capabilities of a remote peer"""
377 """lists the capabilities of a remote peer"""
377 opts = pycompat.byteskwargs(opts)
378 opts = pycompat.byteskwargs(opts)
378 peer = hg.peer(ui, opts, path)
379 peer = hg.peer(ui, opts, path)
379 caps = peer.capabilities()
380 caps = peer.capabilities()
380 ui.write(('Main capabilities:\n'))
381 ui.write(('Main capabilities:\n'))
381 for c in sorted(caps):
382 for c in sorted(caps):
382 ui.write((' %s\n') % c)
383 ui.write((' %s\n') % c)
383 b2caps = bundle2.bundle2caps(peer)
384 b2caps = bundle2.bundle2caps(peer)
384 if b2caps:
385 if b2caps:
385 ui.write(('Bundle2 capabilities:\n'))
386 ui.write(('Bundle2 capabilities:\n'))
386 for key, values in sorted(b2caps.iteritems()):
387 for key, values in sorted(b2caps.iteritems()):
387 ui.write((' %s\n') % key)
388 ui.write((' %s\n') % key)
388 for v in values:
389 for v in values:
389 ui.write((' %s\n') % v)
390 ui.write((' %s\n') % v)
390
391
391 @command('debugcheckstate', [], '')
392 @command('debugcheckstate', [], '')
392 def debugcheckstate(ui, repo):
393 def debugcheckstate(ui, repo):
393 """validate the correctness of the current dirstate"""
394 """validate the correctness of the current dirstate"""
394 parent1, parent2 = repo.dirstate.parents()
395 parent1, parent2 = repo.dirstate.parents()
395 m1 = repo[parent1].manifest()
396 m1 = repo[parent1].manifest()
396 m2 = repo[parent2].manifest()
397 m2 = repo[parent2].manifest()
397 errors = 0
398 errors = 0
398 for f in repo.dirstate:
399 for f in repo.dirstate:
399 state = repo.dirstate[f]
400 state = repo.dirstate[f]
400 if state in "nr" and f not in m1:
401 if state in "nr" and f not in m1:
401 ui.warn(_("%s in state %s, but not in manifest1\n") % (f, state))
402 ui.warn(_("%s in state %s, but not in manifest1\n") % (f, state))
402 errors += 1
403 errors += 1
403 if state in "a" and f in m1:
404 if state in "a" and f in m1:
404 ui.warn(_("%s in state %s, but also in manifest1\n") % (f, state))
405 ui.warn(_("%s in state %s, but also in manifest1\n") % (f, state))
405 errors += 1
406 errors += 1
406 if state in "m" and f not in m1 and f not in m2:
407 if state in "m" and f not in m1 and f not in m2:
407 ui.warn(_("%s in state %s, but not in either manifest\n") %
408 ui.warn(_("%s in state %s, but not in either manifest\n") %
408 (f, state))
409 (f, state))
409 errors += 1
410 errors += 1
410 for f in m1:
411 for f in m1:
411 state = repo.dirstate[f]
412 state = repo.dirstate[f]
412 if state not in "nrm":
413 if state not in "nrm":
413 ui.warn(_("%s in manifest1, but listed as state %s") % (f, state))
414 ui.warn(_("%s in manifest1, but listed as state %s") % (f, state))
414 errors += 1
415 errors += 1
415 if errors:
416 if errors:
416 error = _(".hg/dirstate inconsistent with current parent's manifest")
417 error = _(".hg/dirstate inconsistent with current parent's manifest")
417 raise error.Abort(error)
418 raise error.Abort(error)
418
419
419 @command('debugcolor',
420 @command('debugcolor',
420 [('', 'style', None, _('show all configured styles'))],
421 [('', 'style', None, _('show all configured styles'))],
421 'hg debugcolor')
422 'hg debugcolor')
422 def debugcolor(ui, repo, **opts):
423 def debugcolor(ui, repo, **opts):
423 """show available color, effects or style"""
424 """show available color, effects or style"""
424 ui.write(('color mode: %s\n') % ui._colormode)
425 ui.write(('color mode: %s\n') % ui._colormode)
425 if opts.get(r'style'):
426 if opts.get(r'style'):
426 return _debugdisplaystyle(ui)
427 return _debugdisplaystyle(ui)
427 else:
428 else:
428 return _debugdisplaycolor(ui)
429 return _debugdisplaycolor(ui)
429
430
430 def _debugdisplaycolor(ui):
431 def _debugdisplaycolor(ui):
431 ui = ui.copy()
432 ui = ui.copy()
432 ui._styles.clear()
433 ui._styles.clear()
433 for effect in color._activeeffects(ui).keys():
434 for effect in color._activeeffects(ui).keys():
434 ui._styles[effect] = effect
435 ui._styles[effect] = effect
435 if ui._terminfoparams:
436 if ui._terminfoparams:
436 for k, v in ui.configitems('color'):
437 for k, v in ui.configitems('color'):
437 if k.startswith('color.'):
438 if k.startswith('color.'):
438 ui._styles[k] = k[6:]
439 ui._styles[k] = k[6:]
439 elif k.startswith('terminfo.'):
440 elif k.startswith('terminfo.'):
440 ui._styles[k] = k[9:]
441 ui._styles[k] = k[9:]
441 ui.write(_('available colors:\n'))
442 ui.write(_('available colors:\n'))
442 # sort label with a '_' after the other to group '_background' entry.
443 # sort label with a '_' after the other to group '_background' entry.
443 items = sorted(ui._styles.items(),
444 items = sorted(ui._styles.items(),
444 key=lambda i: ('_' in i[0], i[0], i[1]))
445 key=lambda i: ('_' in i[0], i[0], i[1]))
445 for colorname, label in items:
446 for colorname, label in items:
446 ui.write(('%s\n') % colorname, label=label)
447 ui.write(('%s\n') % colorname, label=label)
447
448
448 def _debugdisplaystyle(ui):
449 def _debugdisplaystyle(ui):
449 ui.write(_('available style:\n'))
450 ui.write(_('available style:\n'))
450 width = max(len(s) for s in ui._styles)
451 width = max(len(s) for s in ui._styles)
451 for label, effects in sorted(ui._styles.items()):
452 for label, effects in sorted(ui._styles.items()):
452 ui.write('%s' % label, label=label)
453 ui.write('%s' % label, label=label)
453 if effects:
454 if effects:
454 # 50
455 # 50
455 ui.write(': ')
456 ui.write(': ')
456 ui.write(' ' * (max(0, width - len(label))))
457 ui.write(' ' * (max(0, width - len(label))))
457 ui.write(', '.join(ui.label(e, e) for e in effects.split()))
458 ui.write(', '.join(ui.label(e, e) for e in effects.split()))
458 ui.write('\n')
459 ui.write('\n')
459
460
460 @command('debugcreatestreamclonebundle', [], 'FILE')
461 @command('debugcreatestreamclonebundle', [], 'FILE')
461 def debugcreatestreamclonebundle(ui, repo, fname):
462 def debugcreatestreamclonebundle(ui, repo, fname):
462 """create a stream clone bundle file
463 """create a stream clone bundle file
463
464
464 Stream bundles are special bundles that are essentially archives of
465 Stream bundles are special bundles that are essentially archives of
465 revlog files. They are commonly used for cloning very quickly.
466 revlog files. They are commonly used for cloning very quickly.
466 """
467 """
467 # TODO we may want to turn this into an abort when this functionality
468 # TODO we may want to turn this into an abort when this functionality
468 # is moved into `hg bundle`.
469 # is moved into `hg bundle`.
469 if phases.hassecret(repo):
470 if phases.hassecret(repo):
470 ui.warn(_('(warning: stream clone bundle will contain secret '
471 ui.warn(_('(warning: stream clone bundle will contain secret '
471 'revisions)\n'))
472 'revisions)\n'))
472
473
473 requirements, gen = streamclone.generatebundlev1(repo)
474 requirements, gen = streamclone.generatebundlev1(repo)
474 changegroup.writechunks(ui, gen, fname)
475 changegroup.writechunks(ui, gen, fname)
475
476
476 ui.write(_('bundle requirements: %s\n') % ', '.join(sorted(requirements)))
477 ui.write(_('bundle requirements: %s\n') % ', '.join(sorted(requirements)))
477
478
478 @command('debugdag',
479 @command('debugdag',
479 [('t', 'tags', None, _('use tags as labels')),
480 [('t', 'tags', None, _('use tags as labels')),
480 ('b', 'branches', None, _('annotate with branch names')),
481 ('b', 'branches', None, _('annotate with branch names')),
481 ('', 'dots', None, _('use dots for runs')),
482 ('', 'dots', None, _('use dots for runs')),
482 ('s', 'spaces', None, _('separate elements by spaces'))],
483 ('s', 'spaces', None, _('separate elements by spaces'))],
483 _('[OPTION]... [FILE [REV]...]'),
484 _('[OPTION]... [FILE [REV]...]'),
484 optionalrepo=True)
485 optionalrepo=True)
485 def debugdag(ui, repo, file_=None, *revs, **opts):
486 def debugdag(ui, repo, file_=None, *revs, **opts):
486 """format the changelog or an index DAG as a concise textual description
487 """format the changelog or an index DAG as a concise textual description
487
488
488 If you pass a revlog index, the revlog's DAG is emitted. If you list
489 If you pass a revlog index, the revlog's DAG is emitted. If you list
489 revision numbers, they get labeled in the output as rN.
490 revision numbers, they get labeled in the output as rN.
490
491
491 Otherwise, the changelog DAG of the current repo is emitted.
492 Otherwise, the changelog DAG of the current repo is emitted.
492 """
493 """
493 spaces = opts.get(r'spaces')
494 spaces = opts.get(r'spaces')
494 dots = opts.get(r'dots')
495 dots = opts.get(r'dots')
495 if file_:
496 if file_:
496 rlog = revlog.revlog(vfsmod.vfs(pycompat.getcwd(), audit=False),
497 rlog = revlog.revlog(vfsmod.vfs(pycompat.getcwd(), audit=False),
497 file_)
498 file_)
498 revs = set((int(r) for r in revs))
499 revs = set((int(r) for r in revs))
499 def events():
500 def events():
500 for r in rlog:
501 for r in rlog:
501 yield 'n', (r, list(p for p in rlog.parentrevs(r)
502 yield 'n', (r, list(p for p in rlog.parentrevs(r)
502 if p != -1))
503 if p != -1))
503 if r in revs:
504 if r in revs:
504 yield 'l', (r, "r%i" % r)
505 yield 'l', (r, "r%i" % r)
505 elif repo:
506 elif repo:
506 cl = repo.changelog
507 cl = repo.changelog
507 tags = opts.get(r'tags')
508 tags = opts.get(r'tags')
508 branches = opts.get(r'branches')
509 branches = opts.get(r'branches')
509 if tags:
510 if tags:
510 labels = {}
511 labels = {}
511 for l, n in repo.tags().items():
512 for l, n in repo.tags().items():
512 labels.setdefault(cl.rev(n), []).append(l)
513 labels.setdefault(cl.rev(n), []).append(l)
513 def events():
514 def events():
514 b = "default"
515 b = "default"
515 for r in cl:
516 for r in cl:
516 if branches:
517 if branches:
517 newb = cl.read(cl.node(r))[5]['branch']
518 newb = cl.read(cl.node(r))[5]['branch']
518 if newb != b:
519 if newb != b:
519 yield 'a', newb
520 yield 'a', newb
520 b = newb
521 b = newb
521 yield 'n', (r, list(p for p in cl.parentrevs(r)
522 yield 'n', (r, list(p for p in cl.parentrevs(r)
522 if p != -1))
523 if p != -1))
523 if tags:
524 if tags:
524 ls = labels.get(r)
525 ls = labels.get(r)
525 if ls:
526 if ls:
526 for l in ls:
527 for l in ls:
527 yield 'l', (r, l)
528 yield 'l', (r, l)
528 else:
529 else:
529 raise error.Abort(_('need repo for changelog dag'))
530 raise error.Abort(_('need repo for changelog dag'))
530
531
531 for line in dagparser.dagtextlines(events(),
532 for line in dagparser.dagtextlines(events(),
532 addspaces=spaces,
533 addspaces=spaces,
533 wraplabels=True,
534 wraplabels=True,
534 wrapannotations=True,
535 wrapannotations=True,
535 wrapnonlinear=dots,
536 wrapnonlinear=dots,
536 usedots=dots,
537 usedots=dots,
537 maxlinewidth=70):
538 maxlinewidth=70):
538 ui.write(line)
539 ui.write(line)
539 ui.write("\n")
540 ui.write("\n")
540
541
541 @command('debugdata', cmdutil.debugrevlogopts, _('-c|-m|FILE REV'))
542 @command('debugdata', cmdutil.debugrevlogopts, _('-c|-m|FILE REV'))
542 def debugdata(ui, repo, file_, rev=None, **opts):
543 def debugdata(ui, repo, file_, rev=None, **opts):
543 """dump the contents of a data file revision"""
544 """dump the contents of a data file revision"""
544 opts = pycompat.byteskwargs(opts)
545 opts = pycompat.byteskwargs(opts)
545 if opts.get('changelog') or opts.get('manifest') or opts.get('dir'):
546 if opts.get('changelog') or opts.get('manifest') or opts.get('dir'):
546 if rev is not None:
547 if rev is not None:
547 raise error.CommandError('debugdata', _('invalid arguments'))
548 raise error.CommandError('debugdata', _('invalid arguments'))
548 file_, rev = None, file_
549 file_, rev = None, file_
549 elif rev is None:
550 elif rev is None:
550 raise error.CommandError('debugdata', _('invalid arguments'))
551 raise error.CommandError('debugdata', _('invalid arguments'))
551 r = cmdutil.openrevlog(repo, 'debugdata', file_, opts)
552 r = cmdutil.openrevlog(repo, 'debugdata', file_, opts)
552 try:
553 try:
553 ui.write(r.revision(r.lookup(rev), raw=True))
554 ui.write(r.revision(r.lookup(rev), raw=True))
554 except KeyError:
555 except KeyError:
555 raise error.Abort(_('invalid revision identifier %s') % rev)
556 raise error.Abort(_('invalid revision identifier %s') % rev)
556
557
557 @command('debugdate',
558 @command('debugdate',
558 [('e', 'extended', None, _('try extended date formats'))],
559 [('e', 'extended', None, _('try extended date formats'))],
559 _('[-e] DATE [RANGE]'),
560 _('[-e] DATE [RANGE]'),
560 norepo=True, optionalrepo=True)
561 norepo=True, optionalrepo=True)
561 def debugdate(ui, date, range=None, **opts):
562 def debugdate(ui, date, range=None, **opts):
562 """parse and display a date"""
563 """parse and display a date"""
563 if opts[r"extended"]:
564 if opts[r"extended"]:
564 d = dateutil.parsedate(date, util.extendeddateformats)
565 d = dateutil.parsedate(date, util.extendeddateformats)
565 else:
566 else:
566 d = dateutil.parsedate(date)
567 d = dateutil.parsedate(date)
567 ui.write(("internal: %d %d\n") % d)
568 ui.write(("internal: %d %d\n") % d)
568 ui.write(("standard: %s\n") % dateutil.datestr(d))
569 ui.write(("standard: %s\n") % dateutil.datestr(d))
569 if range:
570 if range:
570 m = dateutil.matchdate(range)
571 m = dateutil.matchdate(range)
571 ui.write(("match: %s\n") % m(d[0]))
572 ui.write(("match: %s\n") % m(d[0]))
572
573
573 @command('debugdeltachain',
574 @command('debugdeltachain',
574 cmdutil.debugrevlogopts + cmdutil.formatteropts,
575 cmdutil.debugrevlogopts + cmdutil.formatteropts,
575 _('-c|-m|FILE'),
576 _('-c|-m|FILE'),
576 optionalrepo=True)
577 optionalrepo=True)
577 def debugdeltachain(ui, repo, file_=None, **opts):
578 def debugdeltachain(ui, repo, file_=None, **opts):
578 """dump information about delta chains in a revlog
579 """dump information about delta chains in a revlog
579
580
580 Output can be templatized. Available template keywords are:
581 Output can be templatized. Available template keywords are:
581
582
582 :``rev``: revision number
583 :``rev``: revision number
583 :``chainid``: delta chain identifier (numbered by unique base)
584 :``chainid``: delta chain identifier (numbered by unique base)
584 :``chainlen``: delta chain length to this revision
585 :``chainlen``: delta chain length to this revision
585 :``prevrev``: previous revision in delta chain
586 :``prevrev``: previous revision in delta chain
586 :``deltatype``: role of delta / how it was computed
587 :``deltatype``: role of delta / how it was computed
587 :``compsize``: compressed size of revision
588 :``compsize``: compressed size of revision
588 :``uncompsize``: uncompressed size of revision
589 :``uncompsize``: uncompressed size of revision
589 :``chainsize``: total size of compressed revisions in chain
590 :``chainsize``: total size of compressed revisions in chain
590 :``chainratio``: total chain size divided by uncompressed revision size
591 :``chainratio``: total chain size divided by uncompressed revision size
591 (new delta chains typically start at ratio 2.00)
592 (new delta chains typically start at ratio 2.00)
592 :``lindist``: linear distance from base revision in delta chain to end
593 :``lindist``: linear distance from base revision in delta chain to end
593 of this revision
594 of this revision
594 :``extradist``: total size of revisions not part of this delta chain from
595 :``extradist``: total size of revisions not part of this delta chain from
595 base of delta chain to end of this revision; a measurement
596 base of delta chain to end of this revision; a measurement
596 of how much extra data we need to read/seek across to read
597 of how much extra data we need to read/seek across to read
597 the delta chain for this revision
598 the delta chain for this revision
598 :``extraratio``: extradist divided by chainsize; another representation of
599 :``extraratio``: extradist divided by chainsize; another representation of
599 how much unrelated data is needed to load this delta chain
600 how much unrelated data is needed to load this delta chain
600
601
601 If the repository is configured to use the sparse read, additional keywords
602 If the repository is configured to use the sparse read, additional keywords
602 are available:
603 are available:
603
604
604 :``readsize``: total size of data read from the disk for a revision
605 :``readsize``: total size of data read from the disk for a revision
605 (sum of the sizes of all the blocks)
606 (sum of the sizes of all the blocks)
606 :``largestblock``: size of the largest block of data read from the disk
607 :``largestblock``: size of the largest block of data read from the disk
607 :``readdensity``: density of useful bytes in the data read from the disk
608 :``readdensity``: density of useful bytes in the data read from the disk
608 :``srchunks``: in how many data hunks the whole revision would be read
609 :``srchunks``: in how many data hunks the whole revision would be read
609
610
610 The sparse read can be enabled with experimental.sparse-read = True
611 The sparse read can be enabled with experimental.sparse-read = True
611 """
612 """
612 opts = pycompat.byteskwargs(opts)
613 opts = pycompat.byteskwargs(opts)
613 r = cmdutil.openrevlog(repo, 'debugdeltachain', file_, opts)
614 r = cmdutil.openrevlog(repo, 'debugdeltachain', file_, opts)
614 index = r.index
615 index = r.index
615 generaldelta = r.version & revlog.FLAG_GENERALDELTA
616 generaldelta = r.version & revlog.FLAG_GENERALDELTA
616 withsparseread = getattr(r, '_withsparseread', False)
617 withsparseread = getattr(r, '_withsparseread', False)
617
618
618 def revinfo(rev):
619 def revinfo(rev):
619 e = index[rev]
620 e = index[rev]
620 compsize = e[1]
621 compsize = e[1]
621 uncompsize = e[2]
622 uncompsize = e[2]
622 chainsize = 0
623 chainsize = 0
623
624
624 if generaldelta:
625 if generaldelta:
625 if e[3] == e[5]:
626 if e[3] == e[5]:
626 deltatype = 'p1'
627 deltatype = 'p1'
627 elif e[3] == e[6]:
628 elif e[3] == e[6]:
628 deltatype = 'p2'
629 deltatype = 'p2'
629 elif e[3] == rev - 1:
630 elif e[3] == rev - 1:
630 deltatype = 'prev'
631 deltatype = 'prev'
631 elif e[3] == rev:
632 elif e[3] == rev:
632 deltatype = 'base'
633 deltatype = 'base'
633 else:
634 else:
634 deltatype = 'other'
635 deltatype = 'other'
635 else:
636 else:
636 if e[3] == rev:
637 if e[3] == rev:
637 deltatype = 'base'
638 deltatype = 'base'
638 else:
639 else:
639 deltatype = 'prev'
640 deltatype = 'prev'
640
641
641 chain = r._deltachain(rev)[0]
642 chain = r._deltachain(rev)[0]
642 for iterrev in chain:
643 for iterrev in chain:
643 e = index[iterrev]
644 e = index[iterrev]
644 chainsize += e[1]
645 chainsize += e[1]
645
646
646 return compsize, uncompsize, deltatype, chain, chainsize
647 return compsize, uncompsize, deltatype, chain, chainsize
647
648
648 fm = ui.formatter('debugdeltachain', opts)
649 fm = ui.formatter('debugdeltachain', opts)
649
650
650 fm.plain(' rev chain# chainlen prev delta '
651 fm.plain(' rev chain# chainlen prev delta '
651 'size rawsize chainsize ratio lindist extradist '
652 'size rawsize chainsize ratio lindist extradist '
652 'extraratio')
653 'extraratio')
653 if withsparseread:
654 if withsparseread:
654 fm.plain(' readsize largestblk rddensity srchunks')
655 fm.plain(' readsize largestblk rddensity srchunks')
655 fm.plain('\n')
656 fm.plain('\n')
656
657
657 chainbases = {}
658 chainbases = {}
658 for rev in r:
659 for rev in r:
659 comp, uncomp, deltatype, chain, chainsize = revinfo(rev)
660 comp, uncomp, deltatype, chain, chainsize = revinfo(rev)
660 chainbase = chain[0]
661 chainbase = chain[0]
661 chainid = chainbases.setdefault(chainbase, len(chainbases) + 1)
662 chainid = chainbases.setdefault(chainbase, len(chainbases) + 1)
662 start = r.start
663 start = r.start
663 length = r.length
664 length = r.length
664 basestart = start(chainbase)
665 basestart = start(chainbase)
665 revstart = start(rev)
666 revstart = start(rev)
666 lineardist = revstart + comp - basestart
667 lineardist = revstart + comp - basestart
667 extradist = lineardist - chainsize
668 extradist = lineardist - chainsize
668 try:
669 try:
669 prevrev = chain[-2]
670 prevrev = chain[-2]
670 except IndexError:
671 except IndexError:
671 prevrev = -1
672 prevrev = -1
672
673
673 chainratio = float(chainsize) / float(uncomp)
674 chainratio = float(chainsize) / float(uncomp)
674 extraratio = float(extradist) / float(chainsize)
675 extraratio = float(extradist) / float(chainsize)
675
676
676 fm.startitem()
677 fm.startitem()
677 fm.write('rev chainid chainlen prevrev deltatype compsize '
678 fm.write('rev chainid chainlen prevrev deltatype compsize '
678 'uncompsize chainsize chainratio lindist extradist '
679 'uncompsize chainsize chainratio lindist extradist '
679 'extraratio',
680 'extraratio',
680 '%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f',
681 '%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f',
681 rev, chainid, len(chain), prevrev, deltatype, comp,
682 rev, chainid, len(chain), prevrev, deltatype, comp,
682 uncomp, chainsize, chainratio, lineardist, extradist,
683 uncomp, chainsize, chainratio, lineardist, extradist,
683 extraratio,
684 extraratio,
684 rev=rev, chainid=chainid, chainlen=len(chain),
685 rev=rev, chainid=chainid, chainlen=len(chain),
685 prevrev=prevrev, deltatype=deltatype, compsize=comp,
686 prevrev=prevrev, deltatype=deltatype, compsize=comp,
686 uncompsize=uncomp, chainsize=chainsize,
687 uncompsize=uncomp, chainsize=chainsize,
687 chainratio=chainratio, lindist=lineardist,
688 chainratio=chainratio, lindist=lineardist,
688 extradist=extradist, extraratio=extraratio)
689 extradist=extradist, extraratio=extraratio)
689 if withsparseread:
690 if withsparseread:
690 readsize = 0
691 readsize = 0
691 largestblock = 0
692 largestblock = 0
692 srchunks = 0
693 srchunks = 0
693
694
694 for revschunk in revlog._slicechunk(r, chain):
695 for revschunk in revlog._slicechunk(r, chain):
695 srchunks += 1
696 srchunks += 1
696 blkend = start(revschunk[-1]) + length(revschunk[-1])
697 blkend = start(revschunk[-1]) + length(revschunk[-1])
697 blksize = blkend - start(revschunk[0])
698 blksize = blkend - start(revschunk[0])
698
699
699 readsize += blksize
700 readsize += blksize
700 if largestblock < blksize:
701 if largestblock < blksize:
701 largestblock = blksize
702 largestblock = blksize
702
703
703 readdensity = float(chainsize) / float(readsize)
704 readdensity = float(chainsize) / float(readsize)
704
705
705 fm.write('readsize largestblock readdensity srchunks',
706 fm.write('readsize largestblock readdensity srchunks',
706 ' %10d %10d %9.5f %8d',
707 ' %10d %10d %9.5f %8d',
707 readsize, largestblock, readdensity, srchunks,
708 readsize, largestblock, readdensity, srchunks,
708 readsize=readsize, largestblock=largestblock,
709 readsize=readsize, largestblock=largestblock,
709 readdensity=readdensity, srchunks=srchunks)
710 readdensity=readdensity, srchunks=srchunks)
710
711
711 fm.plain('\n')
712 fm.plain('\n')
712
713
713 fm.end()
714 fm.end()
714
715
715 @command('debugdirstate|debugstate',
716 @command('debugdirstate|debugstate',
716 [('', 'nodates', None, _('do not display the saved mtime')),
717 [('', 'nodates', None, _('do not display the saved mtime')),
717 ('', 'datesort', None, _('sort by saved mtime'))],
718 ('', 'datesort', None, _('sort by saved mtime'))],
718 _('[OPTION]...'))
719 _('[OPTION]...'))
719 def debugstate(ui, repo, **opts):
720 def debugstate(ui, repo, **opts):
720 """show the contents of the current dirstate"""
721 """show the contents of the current dirstate"""
721
722
722 nodates = opts.get(r'nodates')
723 nodates = opts.get(r'nodates')
723 datesort = opts.get(r'datesort')
724 datesort = opts.get(r'datesort')
724
725
725 timestr = ""
726 timestr = ""
726 if datesort:
727 if datesort:
727 keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename
728 keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename
728 else:
729 else:
729 keyfunc = None # sort by filename
730 keyfunc = None # sort by filename
730 for file_, ent in sorted(repo.dirstate._map.iteritems(), key=keyfunc):
731 for file_, ent in sorted(repo.dirstate._map.iteritems(), key=keyfunc):
731 if ent[3] == -1:
732 if ent[3] == -1:
732 timestr = 'unset '
733 timestr = 'unset '
733 elif nodates:
734 elif nodates:
734 timestr = 'set '
735 timestr = 'set '
735 else:
736 else:
736 timestr = time.strftime(r"%Y-%m-%d %H:%M:%S ",
737 timestr = time.strftime(r"%Y-%m-%d %H:%M:%S ",
737 time.localtime(ent[3]))
738 time.localtime(ent[3]))
738 timestr = encoding.strtolocal(timestr)
739 timestr = encoding.strtolocal(timestr)
739 if ent[1] & 0o20000:
740 if ent[1] & 0o20000:
740 mode = 'lnk'
741 mode = 'lnk'
741 else:
742 else:
742 mode = '%3o' % (ent[1] & 0o777 & ~util.umask)
743 mode = '%3o' % (ent[1] & 0o777 & ~util.umask)
743 ui.write("%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
744 ui.write("%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
744 for f in repo.dirstate.copies():
745 for f in repo.dirstate.copies():
745 ui.write(_("copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
746 ui.write(_("copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
746
747
747 @command('debugdiscovery',
748 @command('debugdiscovery',
748 [('', 'old', None, _('use old-style discovery')),
749 [('', 'old', None, _('use old-style discovery')),
749 ('', 'nonheads', None,
750 ('', 'nonheads', None,
750 _('use old-style discovery with non-heads included')),
751 _('use old-style discovery with non-heads included')),
751 ('', 'rev', [], 'restrict discovery to this set of revs'),
752 ('', 'rev', [], 'restrict discovery to this set of revs'),
752 ] + cmdutil.remoteopts,
753 ] + cmdutil.remoteopts,
753 _('[--rev REV] [OTHER]'))
754 _('[--rev REV] [OTHER]'))
754 def debugdiscovery(ui, repo, remoteurl="default", **opts):
755 def debugdiscovery(ui, repo, remoteurl="default", **opts):
755 """runs the changeset discovery protocol in isolation"""
756 """runs the changeset discovery protocol in isolation"""
756 opts = pycompat.byteskwargs(opts)
757 opts = pycompat.byteskwargs(opts)
757 remoteurl, branches = hg.parseurl(ui.expandpath(remoteurl))
758 remoteurl, branches = hg.parseurl(ui.expandpath(remoteurl))
758 remote = hg.peer(repo, opts, remoteurl)
759 remote = hg.peer(repo, opts, remoteurl)
759 ui.status(_('comparing with %s\n') % util.hidepassword(remoteurl))
760 ui.status(_('comparing with %s\n') % util.hidepassword(remoteurl))
760
761
761 # make sure tests are repeatable
762 # make sure tests are repeatable
762 random.seed(12323)
763 random.seed(12323)
763
764
764 def doit(pushedrevs, remoteheads, remote=remote):
765 def doit(pushedrevs, remoteheads, remote=remote):
765 if opts.get('old'):
766 if opts.get('old'):
766 if not util.safehasattr(remote, 'branches'):
767 if not util.safehasattr(remote, 'branches'):
767 # enable in-client legacy support
768 # enable in-client legacy support
768 remote = localrepo.locallegacypeer(remote.local())
769 remote = localrepo.locallegacypeer(remote.local())
769 common, _in, hds = treediscovery.findcommonincoming(repo, remote,
770 common, _in, hds = treediscovery.findcommonincoming(repo, remote,
770 force=True)
771 force=True)
771 common = set(common)
772 common = set(common)
772 if not opts.get('nonheads'):
773 if not opts.get('nonheads'):
773 ui.write(("unpruned common: %s\n") %
774 ui.write(("unpruned common: %s\n") %
774 " ".join(sorted(short(n) for n in common)))
775 " ".join(sorted(short(n) for n in common)))
775 dag = dagutil.revlogdag(repo.changelog)
776 dag = dagutil.revlogdag(repo.changelog)
776 all = dag.ancestorset(dag.internalizeall(common))
777 all = dag.ancestorset(dag.internalizeall(common))
777 common = dag.externalizeall(dag.headsetofconnecteds(all))
778 common = dag.externalizeall(dag.headsetofconnecteds(all))
778 else:
779 else:
779 nodes = None
780 nodes = None
780 if pushedrevs:
781 if pushedrevs:
781 revs = scmutil.revrange(repo, pushedrevs)
782 revs = scmutil.revrange(repo, pushedrevs)
782 nodes = [repo[r].node() for r in revs]
783 nodes = [repo[r].node() for r in revs]
783 common, any, hds = setdiscovery.findcommonheads(ui, repo, remote,
784 common, any, hds = setdiscovery.findcommonheads(ui, repo, remote,
784 ancestorsof=nodes)
785 ancestorsof=nodes)
785 common = set(common)
786 common = set(common)
786 rheads = set(hds)
787 rheads = set(hds)
787 lheads = set(repo.heads())
788 lheads = set(repo.heads())
788 ui.write(("common heads: %s\n") %
789 ui.write(("common heads: %s\n") %
789 " ".join(sorted(short(n) for n in common)))
790 " ".join(sorted(short(n) for n in common)))
790 if lheads <= common:
791 if lheads <= common:
791 ui.write(("local is subset\n"))
792 ui.write(("local is subset\n"))
792 elif rheads <= common:
793 elif rheads <= common:
793 ui.write(("remote is subset\n"))
794 ui.write(("remote is subset\n"))
794
795
795 remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches, revs=None)
796 remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches, revs=None)
796 localrevs = opts['rev']
797 localrevs = opts['rev']
797 doit(localrevs, remoterevs)
798 doit(localrevs, remoterevs)
798
799
799 _chunksize = 4 << 10
800 _chunksize = 4 << 10
800
801
801 @command('debugdownload',
802 @command('debugdownload',
802 [
803 [
803 ('o', 'output', '', _('path')),
804 ('o', 'output', '', _('path')),
804 ],
805 ],
805 optionalrepo=True)
806 optionalrepo=True)
806 def debugdownload(ui, repo, url, output=None, **opts):
807 def debugdownload(ui, repo, url, output=None, **opts):
807 """download a resource using Mercurial logic and config
808 """download a resource using Mercurial logic and config
808 """
809 """
809 fh = urlmod.open(ui, url, output)
810 fh = urlmod.open(ui, url, output)
810
811
811 dest = ui
812 dest = ui
812 if output:
813 if output:
813 dest = open(output, "wb", _chunksize)
814 dest = open(output, "wb", _chunksize)
814 try:
815 try:
815 data = fh.read(_chunksize)
816 data = fh.read(_chunksize)
816 while data:
817 while data:
817 dest.write(data)
818 dest.write(data)
818 data = fh.read(_chunksize)
819 data = fh.read(_chunksize)
819 finally:
820 finally:
820 if output:
821 if output:
821 dest.close()
822 dest.close()
822
823
823 @command('debugextensions', cmdutil.formatteropts, [], norepo=True)
824 @command('debugextensions', cmdutil.formatteropts, [], norepo=True)
824 def debugextensions(ui, **opts):
825 def debugextensions(ui, **opts):
825 '''show information about active extensions'''
826 '''show information about active extensions'''
826 opts = pycompat.byteskwargs(opts)
827 opts = pycompat.byteskwargs(opts)
827 exts = extensions.extensions(ui)
828 exts = extensions.extensions(ui)
828 hgver = util.version()
829 hgver = util.version()
829 fm = ui.formatter('debugextensions', opts)
830 fm = ui.formatter('debugextensions', opts)
830 for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
831 for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
831 isinternal = extensions.ismoduleinternal(extmod)
832 isinternal = extensions.ismoduleinternal(extmod)
832 extsource = pycompat.fsencode(extmod.__file__)
833 extsource = pycompat.fsencode(extmod.__file__)
833 if isinternal:
834 if isinternal:
834 exttestedwith = [] # never expose magic string to users
835 exttestedwith = [] # never expose magic string to users
835 else:
836 else:
836 exttestedwith = getattr(extmod, 'testedwith', '').split()
837 exttestedwith = getattr(extmod, 'testedwith', '').split()
837 extbuglink = getattr(extmod, 'buglink', None)
838 extbuglink = getattr(extmod, 'buglink', None)
838
839
839 fm.startitem()
840 fm.startitem()
840
841
841 if ui.quiet or ui.verbose:
842 if ui.quiet or ui.verbose:
842 fm.write('name', '%s\n', extname)
843 fm.write('name', '%s\n', extname)
843 else:
844 else:
844 fm.write('name', '%s', extname)
845 fm.write('name', '%s', extname)
845 if isinternal or hgver in exttestedwith:
846 if isinternal or hgver in exttestedwith:
846 fm.plain('\n')
847 fm.plain('\n')
847 elif not exttestedwith:
848 elif not exttestedwith:
848 fm.plain(_(' (untested!)\n'))
849 fm.plain(_(' (untested!)\n'))
849 else:
850 else:
850 lasttestedversion = exttestedwith[-1]
851 lasttestedversion = exttestedwith[-1]
851 fm.plain(' (%s!)\n' % lasttestedversion)
852 fm.plain(' (%s!)\n' % lasttestedversion)
852
853
853 fm.condwrite(ui.verbose and extsource, 'source',
854 fm.condwrite(ui.verbose and extsource, 'source',
854 _(' location: %s\n'), extsource or "")
855 _(' location: %s\n'), extsource or "")
855
856
856 if ui.verbose:
857 if ui.verbose:
857 fm.plain(_(' bundled: %s\n') % ['no', 'yes'][isinternal])
858 fm.plain(_(' bundled: %s\n') % ['no', 'yes'][isinternal])
858 fm.data(bundled=isinternal)
859 fm.data(bundled=isinternal)
859
860
860 fm.condwrite(ui.verbose and exttestedwith, 'testedwith',
861 fm.condwrite(ui.verbose and exttestedwith, 'testedwith',
861 _(' tested with: %s\n'),
862 _(' tested with: %s\n'),
862 fm.formatlist(exttestedwith, name='ver'))
863 fm.formatlist(exttestedwith, name='ver'))
863
864
864 fm.condwrite(ui.verbose and extbuglink, 'buglink',
865 fm.condwrite(ui.verbose and extbuglink, 'buglink',
865 _(' bug reporting: %s\n'), extbuglink or "")
866 _(' bug reporting: %s\n'), extbuglink or "")
866
867
867 fm.end()
868 fm.end()
868
869
869 @command('debugfileset',
870 @command('debugfileset',
870 [('r', 'rev', '', _('apply the filespec on this revision'), _('REV'))],
871 [('r', 'rev', '', _('apply the filespec on this revision'), _('REV'))],
871 _('[-r REV] FILESPEC'))
872 _('[-r REV] FILESPEC'))
872 def debugfileset(ui, repo, expr, **opts):
873 def debugfileset(ui, repo, expr, **opts):
873 '''parse and apply a fileset specification'''
874 '''parse and apply a fileset specification'''
874 ctx = scmutil.revsingle(repo, opts.get(r'rev'), None)
875 ctx = scmutil.revsingle(repo, opts.get(r'rev'), None)
875 if ui.verbose:
876 if ui.verbose:
876 tree = fileset.parse(expr)
877 tree = fileset.parse(expr)
877 ui.note(fileset.prettyformat(tree), "\n")
878 ui.note(fileset.prettyformat(tree), "\n")
878
879
879 for f in ctx.getfileset(expr):
880 for f in ctx.getfileset(expr):
880 ui.write("%s\n" % f)
881 ui.write("%s\n" % f)
881
882
882 @command('debugformat',
883 @command('debugformat',
883 [] + cmdutil.formatteropts,
884 [] + cmdutil.formatteropts,
884 _(''))
885 _(''))
885 def debugformat(ui, repo, **opts):
886 def debugformat(ui, repo, **opts):
886 """display format information about the current repository
887 """display format information about the current repository
887
888
888 Use --verbose to get extra information about current config value and
889 Use --verbose to get extra information about current config value and
889 Mercurial default."""
890 Mercurial default."""
890 opts = pycompat.byteskwargs(opts)
891 opts = pycompat.byteskwargs(opts)
891 maxvariantlength = max(len(fv.name) for fv in upgrade.allformatvariant)
892 maxvariantlength = max(len(fv.name) for fv in upgrade.allformatvariant)
892 maxvariantlength = max(len('format-variant'), maxvariantlength)
893 maxvariantlength = max(len('format-variant'), maxvariantlength)
893
894
894 def makeformatname(name):
895 def makeformatname(name):
895 return '%s:' + (' ' * (maxvariantlength - len(name)))
896 return '%s:' + (' ' * (maxvariantlength - len(name)))
896
897
897 fm = ui.formatter('debugformat', opts)
898 fm = ui.formatter('debugformat', opts)
898 if fm.isplain():
899 if fm.isplain():
899 def formatvalue(value):
900 def formatvalue(value):
900 if util.safehasattr(value, 'startswith'):
901 if util.safehasattr(value, 'startswith'):
901 return value
902 return value
902 if value:
903 if value:
903 return 'yes'
904 return 'yes'
904 else:
905 else:
905 return 'no'
906 return 'no'
906 else:
907 else:
907 formatvalue = pycompat.identity
908 formatvalue = pycompat.identity
908
909
909 fm.plain('format-variant')
910 fm.plain('format-variant')
910 fm.plain(' ' * (maxvariantlength - len('format-variant')))
911 fm.plain(' ' * (maxvariantlength - len('format-variant')))
911 fm.plain(' repo')
912 fm.plain(' repo')
912 if ui.verbose:
913 if ui.verbose:
913 fm.plain(' config default')
914 fm.plain(' config default')
914 fm.plain('\n')
915 fm.plain('\n')
915 for fv in upgrade.allformatvariant:
916 for fv in upgrade.allformatvariant:
916 fm.startitem()
917 fm.startitem()
917 repovalue = fv.fromrepo(repo)
918 repovalue = fv.fromrepo(repo)
918 configvalue = fv.fromconfig(repo)
919 configvalue = fv.fromconfig(repo)
919
920
920 if repovalue != configvalue:
921 if repovalue != configvalue:
921 namelabel = 'formatvariant.name.mismatchconfig'
922 namelabel = 'formatvariant.name.mismatchconfig'
922 repolabel = 'formatvariant.repo.mismatchconfig'
923 repolabel = 'formatvariant.repo.mismatchconfig'
923 elif repovalue != fv.default:
924 elif repovalue != fv.default:
924 namelabel = 'formatvariant.name.mismatchdefault'
925 namelabel = 'formatvariant.name.mismatchdefault'
925 repolabel = 'formatvariant.repo.mismatchdefault'
926 repolabel = 'formatvariant.repo.mismatchdefault'
926 else:
927 else:
927 namelabel = 'formatvariant.name.uptodate'
928 namelabel = 'formatvariant.name.uptodate'
928 repolabel = 'formatvariant.repo.uptodate'
929 repolabel = 'formatvariant.repo.uptodate'
929
930
930 fm.write('name', makeformatname(fv.name), fv.name,
931 fm.write('name', makeformatname(fv.name), fv.name,
931 label=namelabel)
932 label=namelabel)
932 fm.write('repo', ' %3s', formatvalue(repovalue),
933 fm.write('repo', ' %3s', formatvalue(repovalue),
933 label=repolabel)
934 label=repolabel)
934 if fv.default != configvalue:
935 if fv.default != configvalue:
935 configlabel = 'formatvariant.config.special'
936 configlabel = 'formatvariant.config.special'
936 else:
937 else:
937 configlabel = 'formatvariant.config.default'
938 configlabel = 'formatvariant.config.default'
938 fm.condwrite(ui.verbose, 'config', ' %6s', formatvalue(configvalue),
939 fm.condwrite(ui.verbose, 'config', ' %6s', formatvalue(configvalue),
939 label=configlabel)
940 label=configlabel)
940 fm.condwrite(ui.verbose, 'default', ' %7s', formatvalue(fv.default),
941 fm.condwrite(ui.verbose, 'default', ' %7s', formatvalue(fv.default),
941 label='formatvariant.default')
942 label='formatvariant.default')
942 fm.plain('\n')
943 fm.plain('\n')
943 fm.end()
944 fm.end()
944
945
945 @command('debugfsinfo', [], _('[PATH]'), norepo=True)
946 @command('debugfsinfo', [], _('[PATH]'), norepo=True)
946 def debugfsinfo(ui, path="."):
947 def debugfsinfo(ui, path="."):
947 """show information detected about current filesystem"""
948 """show information detected about current filesystem"""
948 ui.write(('path: %s\n') % path)
949 ui.write(('path: %s\n') % path)
949 ui.write(('mounted on: %s\n') % (util.getfsmountpoint(path) or '(unknown)'))
950 ui.write(('mounted on: %s\n') % (util.getfsmountpoint(path) or '(unknown)'))
950 ui.write(('exec: %s\n') % (util.checkexec(path) and 'yes' or 'no'))
951 ui.write(('exec: %s\n') % (util.checkexec(path) and 'yes' or 'no'))
951 ui.write(('fstype: %s\n') % (util.getfstype(path) or '(unknown)'))
952 ui.write(('fstype: %s\n') % (util.getfstype(path) or '(unknown)'))
952 ui.write(('symlink: %s\n') % (util.checklink(path) and 'yes' or 'no'))
953 ui.write(('symlink: %s\n') % (util.checklink(path) and 'yes' or 'no'))
953 ui.write(('hardlink: %s\n') % (util.checknlink(path) and 'yes' or 'no'))
954 ui.write(('hardlink: %s\n') % (util.checknlink(path) and 'yes' or 'no'))
954 casesensitive = '(unknown)'
955 casesensitive = '(unknown)'
955 try:
956 try:
956 with tempfile.NamedTemporaryFile(prefix='.debugfsinfo', dir=path) as f:
957 with tempfile.NamedTemporaryFile(prefix='.debugfsinfo', dir=path) as f:
957 casesensitive = util.fscasesensitive(f.name) and 'yes' or 'no'
958 casesensitive = util.fscasesensitive(f.name) and 'yes' or 'no'
958 except OSError:
959 except OSError:
959 pass
960 pass
960 ui.write(('case-sensitive: %s\n') % casesensitive)
961 ui.write(('case-sensitive: %s\n') % casesensitive)
961
962
962 @command('debuggetbundle',
963 @command('debuggetbundle',
963 [('H', 'head', [], _('id of head node'), _('ID')),
964 [('H', 'head', [], _('id of head node'), _('ID')),
964 ('C', 'common', [], _('id of common node'), _('ID')),
965 ('C', 'common', [], _('id of common node'), _('ID')),
965 ('t', 'type', 'bzip2', _('bundle compression type to use'), _('TYPE'))],
966 ('t', 'type', 'bzip2', _('bundle compression type to use'), _('TYPE'))],
966 _('REPO FILE [-H|-C ID]...'),
967 _('REPO FILE [-H|-C ID]...'),
967 norepo=True)
968 norepo=True)
968 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
969 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
969 """retrieves a bundle from a repo
970 """retrieves a bundle from a repo
970
971
971 Every ID must be a full-length hex node id string. Saves the bundle to the
972 Every ID must be a full-length hex node id string. Saves the bundle to the
972 given file.
973 given file.
973 """
974 """
974 opts = pycompat.byteskwargs(opts)
975 opts = pycompat.byteskwargs(opts)
975 repo = hg.peer(ui, opts, repopath)
976 repo = hg.peer(ui, opts, repopath)
976 if not repo.capable('getbundle'):
977 if not repo.capable('getbundle'):
977 raise error.Abort("getbundle() not supported by target repository")
978 raise error.Abort("getbundle() not supported by target repository")
978 args = {}
979 args = {}
979 if common:
980 if common:
980 args[r'common'] = [bin(s) for s in common]
981 args[r'common'] = [bin(s) for s in common]
981 if head:
982 if head:
982 args[r'heads'] = [bin(s) for s in head]
983 args[r'heads'] = [bin(s) for s in head]
983 # TODO: get desired bundlecaps from command line.
984 # TODO: get desired bundlecaps from command line.
984 args[r'bundlecaps'] = None
985 args[r'bundlecaps'] = None
985 bundle = repo.getbundle('debug', **args)
986 bundle = repo.getbundle('debug', **args)
986
987
987 bundletype = opts.get('type', 'bzip2').lower()
988 bundletype = opts.get('type', 'bzip2').lower()
988 btypes = {'none': 'HG10UN',
989 btypes = {'none': 'HG10UN',
989 'bzip2': 'HG10BZ',
990 'bzip2': 'HG10BZ',
990 'gzip': 'HG10GZ',
991 'gzip': 'HG10GZ',
991 'bundle2': 'HG20'}
992 'bundle2': 'HG20'}
992 bundletype = btypes.get(bundletype)
993 bundletype = btypes.get(bundletype)
993 if bundletype not in bundle2.bundletypes:
994 if bundletype not in bundle2.bundletypes:
994 raise error.Abort(_('unknown bundle type specified with --type'))
995 raise error.Abort(_('unknown bundle type specified with --type'))
995 bundle2.writebundle(ui, bundle, bundlepath, bundletype)
996 bundle2.writebundle(ui, bundle, bundlepath, bundletype)
996
997
997 @command('debugignore', [], '[FILE]')
998 @command('debugignore', [], '[FILE]')
998 def debugignore(ui, repo, *files, **opts):
999 def debugignore(ui, repo, *files, **opts):
999 """display the combined ignore pattern and information about ignored files
1000 """display the combined ignore pattern and information about ignored files
1000
1001
1001 With no argument display the combined ignore pattern.
1002 With no argument display the combined ignore pattern.
1002
1003
1003 Given space separated file names, shows if the given file is ignored and
1004 Given space separated file names, shows if the given file is ignored and
1004 if so, show the ignore rule (file and line number) that matched it.
1005 if so, show the ignore rule (file and line number) that matched it.
1005 """
1006 """
1006 ignore = repo.dirstate._ignore
1007 ignore = repo.dirstate._ignore
1007 if not files:
1008 if not files:
1008 # Show all the patterns
1009 # Show all the patterns
1009 ui.write("%s\n" % pycompat.byterepr(ignore))
1010 ui.write("%s\n" % pycompat.byterepr(ignore))
1010 else:
1011 else:
1011 m = scmutil.match(repo[None], pats=files)
1012 m = scmutil.match(repo[None], pats=files)
1012 for f in m.files():
1013 for f in m.files():
1013 nf = util.normpath(f)
1014 nf = util.normpath(f)
1014 ignored = None
1015 ignored = None
1015 ignoredata = None
1016 ignoredata = None
1016 if nf != '.':
1017 if nf != '.':
1017 if ignore(nf):
1018 if ignore(nf):
1018 ignored = nf
1019 ignored = nf
1019 ignoredata = repo.dirstate._ignorefileandline(nf)
1020 ignoredata = repo.dirstate._ignorefileandline(nf)
1020 else:
1021 else:
1021 for p in util.finddirs(nf):
1022 for p in util.finddirs(nf):
1022 if ignore(p):
1023 if ignore(p):
1023 ignored = p
1024 ignored = p
1024 ignoredata = repo.dirstate._ignorefileandline(p)
1025 ignoredata = repo.dirstate._ignorefileandline(p)
1025 break
1026 break
1026 if ignored:
1027 if ignored:
1027 if ignored == nf:
1028 if ignored == nf:
1028 ui.write(_("%s is ignored\n") % m.uipath(f))
1029 ui.write(_("%s is ignored\n") % m.uipath(f))
1029 else:
1030 else:
1030 ui.write(_("%s is ignored because of "
1031 ui.write(_("%s is ignored because of "
1031 "containing folder %s\n")
1032 "containing folder %s\n")
1032 % (m.uipath(f), ignored))
1033 % (m.uipath(f), ignored))
1033 ignorefile, lineno, line = ignoredata
1034 ignorefile, lineno, line = ignoredata
1034 ui.write(_("(ignore rule in %s, line %d: '%s')\n")
1035 ui.write(_("(ignore rule in %s, line %d: '%s')\n")
1035 % (ignorefile, lineno, line))
1036 % (ignorefile, lineno, line))
1036 else:
1037 else:
1037 ui.write(_("%s is not ignored\n") % m.uipath(f))
1038 ui.write(_("%s is not ignored\n") % m.uipath(f))
1038
1039
1039 @command('debugindex', cmdutil.debugrevlogopts +
1040 @command('debugindex', cmdutil.debugrevlogopts +
1040 [('f', 'format', 0, _('revlog format'), _('FORMAT'))],
1041 [('f', 'format', 0, _('revlog format'), _('FORMAT'))],
1041 _('[-f FORMAT] -c|-m|FILE'),
1042 _('[-f FORMAT] -c|-m|FILE'),
1042 optionalrepo=True)
1043 optionalrepo=True)
1043 def debugindex(ui, repo, file_=None, **opts):
1044 def debugindex(ui, repo, file_=None, **opts):
1044 """dump the contents of an index file"""
1045 """dump the contents of an index file"""
1045 opts = pycompat.byteskwargs(opts)
1046 opts = pycompat.byteskwargs(opts)
1046 r = cmdutil.openrevlog(repo, 'debugindex', file_, opts)
1047 r = cmdutil.openrevlog(repo, 'debugindex', file_, opts)
1047 format = opts.get('format', 0)
1048 format = opts.get('format', 0)
1048 if format not in (0, 1):
1049 if format not in (0, 1):
1049 raise error.Abort(_("unknown format %d") % format)
1050 raise error.Abort(_("unknown format %d") % format)
1050
1051
1051 generaldelta = r.version & revlog.FLAG_GENERALDELTA
1052 generaldelta = r.version & revlog.FLAG_GENERALDELTA
1052 if generaldelta:
1053 if generaldelta:
1053 basehdr = ' delta'
1054 basehdr = ' delta'
1054 else:
1055 else:
1055 basehdr = ' base'
1056 basehdr = ' base'
1056
1057
1057 if ui.debugflag:
1058 if ui.debugflag:
1058 shortfn = hex
1059 shortfn = hex
1059 else:
1060 else:
1060 shortfn = short
1061 shortfn = short
1061
1062
1062 # There might not be anything in r, so have a sane default
1063 # There might not be anything in r, so have a sane default
1063 idlen = 12
1064 idlen = 12
1064 for i in r:
1065 for i in r:
1065 idlen = len(shortfn(r.node(i)))
1066 idlen = len(shortfn(r.node(i)))
1066 break
1067 break
1067
1068
1068 if format == 0:
1069 if format == 0:
1069 ui.write((" rev offset length " + basehdr + " linkrev"
1070 ui.write((" rev offset length " + basehdr + " linkrev"
1070 " %s %s p2\n") % ("nodeid".ljust(idlen), "p1".ljust(idlen)))
1071 " %s %s p2\n") % ("nodeid".ljust(idlen), "p1".ljust(idlen)))
1071 elif format == 1:
1072 elif format == 1:
1072 ui.write((" rev flag offset length"
1073 ui.write((" rev flag offset length"
1073 " size " + basehdr + " link p1 p2"
1074 " size " + basehdr + " link p1 p2"
1074 " %s\n") % "nodeid".rjust(idlen))
1075 " %s\n") % "nodeid".rjust(idlen))
1075
1076
1076 for i in r:
1077 for i in r:
1077 node = r.node(i)
1078 node = r.node(i)
1078 if generaldelta:
1079 if generaldelta:
1079 base = r.deltaparent(i)
1080 base = r.deltaparent(i)
1080 else:
1081 else:
1081 base = r.chainbase(i)
1082 base = r.chainbase(i)
1082 if format == 0:
1083 if format == 0:
1083 try:
1084 try:
1084 pp = r.parents(node)
1085 pp = r.parents(node)
1085 except Exception:
1086 except Exception:
1086 pp = [nullid, nullid]
1087 pp = [nullid, nullid]
1087 ui.write("% 6d % 9d % 7d % 6d % 7d %s %s %s\n" % (
1088 ui.write("% 6d % 9d % 7d % 6d % 7d %s %s %s\n" % (
1088 i, r.start(i), r.length(i), base, r.linkrev(i),
1089 i, r.start(i), r.length(i), base, r.linkrev(i),
1089 shortfn(node), shortfn(pp[0]), shortfn(pp[1])))
1090 shortfn(node), shortfn(pp[0]), shortfn(pp[1])))
1090 elif format == 1:
1091 elif format == 1:
1091 pr = r.parentrevs(i)
1092 pr = r.parentrevs(i)
1092 ui.write("% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d % 6d %s\n" % (
1093 ui.write("% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d % 6d %s\n" % (
1093 i, r.flags(i), r.start(i), r.length(i), r.rawsize(i),
1094 i, r.flags(i), r.start(i), r.length(i), r.rawsize(i),
1094 base, r.linkrev(i), pr[0], pr[1], shortfn(node)))
1095 base, r.linkrev(i), pr[0], pr[1], shortfn(node)))
1095
1096
1096 @command('debugindexdot', cmdutil.debugrevlogopts,
1097 @command('debugindexdot', cmdutil.debugrevlogopts,
1097 _('-c|-m|FILE'), optionalrepo=True)
1098 _('-c|-m|FILE'), optionalrepo=True)
1098 def debugindexdot(ui, repo, file_=None, **opts):
1099 def debugindexdot(ui, repo, file_=None, **opts):
1099 """dump an index DAG as a graphviz dot file"""
1100 """dump an index DAG as a graphviz dot file"""
1100 opts = pycompat.byteskwargs(opts)
1101 opts = pycompat.byteskwargs(opts)
1101 r = cmdutil.openrevlog(repo, 'debugindexdot', file_, opts)
1102 r = cmdutil.openrevlog(repo, 'debugindexdot', file_, opts)
1102 ui.write(("digraph G {\n"))
1103 ui.write(("digraph G {\n"))
1103 for i in r:
1104 for i in r:
1104 node = r.node(i)
1105 node = r.node(i)
1105 pp = r.parents(node)
1106 pp = r.parents(node)
1106 ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i))
1107 ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i))
1107 if pp[1] != nullid:
1108 if pp[1] != nullid:
1108 ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i))
1109 ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i))
1109 ui.write("}\n")
1110 ui.write("}\n")
1110
1111
1111 @command('debuginstall', [] + cmdutil.formatteropts, '', norepo=True)
1112 @command('debuginstall', [] + cmdutil.formatteropts, '', norepo=True)
1112 def debuginstall(ui, **opts):
1113 def debuginstall(ui, **opts):
1113 '''test Mercurial installation
1114 '''test Mercurial installation
1114
1115
1115 Returns 0 on success.
1116 Returns 0 on success.
1116 '''
1117 '''
1117 opts = pycompat.byteskwargs(opts)
1118 opts = pycompat.byteskwargs(opts)
1118
1119
1119 def writetemp(contents):
1120 def writetemp(contents):
1120 (fd, name) = tempfile.mkstemp(prefix="hg-debuginstall-")
1121 (fd, name) = tempfile.mkstemp(prefix="hg-debuginstall-")
1121 f = os.fdopen(fd, pycompat.sysstr("wb"))
1122 f = os.fdopen(fd, pycompat.sysstr("wb"))
1122 f.write(contents)
1123 f.write(contents)
1123 f.close()
1124 f.close()
1124 return name
1125 return name
1125
1126
1126 problems = 0
1127 problems = 0
1127
1128
1128 fm = ui.formatter('debuginstall', opts)
1129 fm = ui.formatter('debuginstall', opts)
1129 fm.startitem()
1130 fm.startitem()
1130
1131
1131 # encoding
1132 # encoding
1132 fm.write('encoding', _("checking encoding (%s)...\n"), encoding.encoding)
1133 fm.write('encoding', _("checking encoding (%s)...\n"), encoding.encoding)
1133 err = None
1134 err = None
1134 try:
1135 try:
1135 codecs.lookup(pycompat.sysstr(encoding.encoding))
1136 codecs.lookup(pycompat.sysstr(encoding.encoding))
1136 except LookupError as inst:
1137 except LookupError as inst:
1137 err = util.forcebytestr(inst)
1138 err = util.forcebytestr(inst)
1138 problems += 1
1139 problems += 1
1139 fm.condwrite(err, 'encodingerror', _(" %s\n"
1140 fm.condwrite(err, 'encodingerror', _(" %s\n"
1140 " (check that your locale is properly set)\n"), err)
1141 " (check that your locale is properly set)\n"), err)
1141
1142
1142 # Python
1143 # Python
1143 fm.write('pythonexe', _("checking Python executable (%s)\n"),
1144 fm.write('pythonexe', _("checking Python executable (%s)\n"),
1144 pycompat.sysexecutable)
1145 pycompat.sysexecutable)
1145 fm.write('pythonver', _("checking Python version (%s)\n"),
1146 fm.write('pythonver', _("checking Python version (%s)\n"),
1146 ("%d.%d.%d" % sys.version_info[:3]))
1147 ("%d.%d.%d" % sys.version_info[:3]))
1147 fm.write('pythonlib', _("checking Python lib (%s)...\n"),
1148 fm.write('pythonlib', _("checking Python lib (%s)...\n"),
1148 os.path.dirname(pycompat.fsencode(os.__file__)))
1149 os.path.dirname(pycompat.fsencode(os.__file__)))
1149
1150
1150 security = set(sslutil.supportedprotocols)
1151 security = set(sslutil.supportedprotocols)
1151 if sslutil.hassni:
1152 if sslutil.hassni:
1152 security.add('sni')
1153 security.add('sni')
1153
1154
1154 fm.write('pythonsecurity', _("checking Python security support (%s)\n"),
1155 fm.write('pythonsecurity', _("checking Python security support (%s)\n"),
1155 fm.formatlist(sorted(security), name='protocol',
1156 fm.formatlist(sorted(security), name='protocol',
1156 fmt='%s', sep=','))
1157 fmt='%s', sep=','))
1157
1158
1158 # These are warnings, not errors. So don't increment problem count. This
1159 # These are warnings, not errors. So don't increment problem count. This
1159 # may change in the future.
1160 # may change in the future.
1160 if 'tls1.2' not in security:
1161 if 'tls1.2' not in security:
1161 fm.plain(_(' TLS 1.2 not supported by Python install; '
1162 fm.plain(_(' TLS 1.2 not supported by Python install; '
1162 'network connections lack modern security\n'))
1163 'network connections lack modern security\n'))
1163 if 'sni' not in security:
1164 if 'sni' not in security:
1164 fm.plain(_(' SNI not supported by Python install; may have '
1165 fm.plain(_(' SNI not supported by Python install; may have '
1165 'connectivity issues with some servers\n'))
1166 'connectivity issues with some servers\n'))
1166
1167
1167 # TODO print CA cert info
1168 # TODO print CA cert info
1168
1169
1169 # hg version
1170 # hg version
1170 hgver = util.version()
1171 hgver = util.version()
1171 fm.write('hgver', _("checking Mercurial version (%s)\n"),
1172 fm.write('hgver', _("checking Mercurial version (%s)\n"),
1172 hgver.split('+')[0])
1173 hgver.split('+')[0])
1173 fm.write('hgverextra', _("checking Mercurial custom build (%s)\n"),
1174 fm.write('hgverextra', _("checking Mercurial custom build (%s)\n"),
1174 '+'.join(hgver.split('+')[1:]))
1175 '+'.join(hgver.split('+')[1:]))
1175
1176
1176 # compiled modules
1177 # compiled modules
1177 fm.write('hgmodulepolicy', _("checking module policy (%s)\n"),
1178 fm.write('hgmodulepolicy', _("checking module policy (%s)\n"),
1178 policy.policy)
1179 policy.policy)
1179 fm.write('hgmodules', _("checking installed modules (%s)...\n"),
1180 fm.write('hgmodules', _("checking installed modules (%s)...\n"),
1180 os.path.dirname(pycompat.fsencode(__file__)))
1181 os.path.dirname(pycompat.fsencode(__file__)))
1181
1182
1182 if policy.policy in ('c', 'allow'):
1183 if policy.policy in ('c', 'allow'):
1183 err = None
1184 err = None
1184 try:
1185 try:
1185 from .cext import (
1186 from .cext import (
1186 base85,
1187 base85,
1187 bdiff,
1188 bdiff,
1188 mpatch,
1189 mpatch,
1189 osutil,
1190 osutil,
1190 )
1191 )
1191 dir(bdiff), dir(mpatch), dir(base85), dir(osutil) # quiet pyflakes
1192 dir(bdiff), dir(mpatch), dir(base85), dir(osutil) # quiet pyflakes
1192 except Exception as inst:
1193 except Exception as inst:
1193 err = util.forcebytestr(inst)
1194 err = util.forcebytestr(inst)
1194 problems += 1
1195 problems += 1
1195 fm.condwrite(err, 'extensionserror', " %s\n", err)
1196 fm.condwrite(err, 'extensionserror', " %s\n", err)
1196
1197
1197 compengines = util.compengines._engines.values()
1198 compengines = util.compengines._engines.values()
1198 fm.write('compengines', _('checking registered compression engines (%s)\n'),
1199 fm.write('compengines', _('checking registered compression engines (%s)\n'),
1199 fm.formatlist(sorted(e.name() for e in compengines),
1200 fm.formatlist(sorted(e.name() for e in compengines),
1200 name='compengine', fmt='%s', sep=', '))
1201 name='compengine', fmt='%s', sep=', '))
1201 fm.write('compenginesavail', _('checking available compression engines '
1202 fm.write('compenginesavail', _('checking available compression engines '
1202 '(%s)\n'),
1203 '(%s)\n'),
1203 fm.formatlist(sorted(e.name() for e in compengines
1204 fm.formatlist(sorted(e.name() for e in compengines
1204 if e.available()),
1205 if e.available()),
1205 name='compengine', fmt='%s', sep=', '))
1206 name='compengine', fmt='%s', sep=', '))
1206 wirecompengines = util.compengines.supportedwireengines(util.SERVERROLE)
1207 wirecompengines = util.compengines.supportedwireengines(util.SERVERROLE)
1207 fm.write('compenginesserver', _('checking available compression engines '
1208 fm.write('compenginesserver', _('checking available compression engines '
1208 'for wire protocol (%s)\n'),
1209 'for wire protocol (%s)\n'),
1209 fm.formatlist([e.name() for e in wirecompengines
1210 fm.formatlist([e.name() for e in wirecompengines
1210 if e.wireprotosupport()],
1211 if e.wireprotosupport()],
1211 name='compengine', fmt='%s', sep=', '))
1212 name='compengine', fmt='%s', sep=', '))
1212 re2 = 'missing'
1213 re2 = 'missing'
1213 if util._re2:
1214 if util._re2:
1214 re2 = 'available'
1215 re2 = 'available'
1215 fm.plain(_('checking "re2" regexp engine (%s)\n') % re2)
1216 fm.plain(_('checking "re2" regexp engine (%s)\n') % re2)
1216 fm.data(re2=bool(util._re2))
1217 fm.data(re2=bool(util._re2))
1217
1218
1218 # templates
1219 # templates
1219 p = templater.templatepaths()
1220 p = templater.templatepaths()
1220 fm.write('templatedirs', 'checking templates (%s)...\n', ' '.join(p))
1221 fm.write('templatedirs', 'checking templates (%s)...\n', ' '.join(p))
1221 fm.condwrite(not p, '', _(" no template directories found\n"))
1222 fm.condwrite(not p, '', _(" no template directories found\n"))
1222 if p:
1223 if p:
1223 m = templater.templatepath("map-cmdline.default")
1224 m = templater.templatepath("map-cmdline.default")
1224 if m:
1225 if m:
1225 # template found, check if it is working
1226 # template found, check if it is working
1226 err = None
1227 err = None
1227 try:
1228 try:
1228 templater.templater.frommapfile(m)
1229 templater.templater.frommapfile(m)
1229 except Exception as inst:
1230 except Exception as inst:
1230 err = util.forcebytestr(inst)
1231 err = util.forcebytestr(inst)
1231 p = None
1232 p = None
1232 fm.condwrite(err, 'defaulttemplateerror', " %s\n", err)
1233 fm.condwrite(err, 'defaulttemplateerror', " %s\n", err)
1233 else:
1234 else:
1234 p = None
1235 p = None
1235 fm.condwrite(p, 'defaulttemplate',
1236 fm.condwrite(p, 'defaulttemplate',
1236 _("checking default template (%s)\n"), m)
1237 _("checking default template (%s)\n"), m)
1237 fm.condwrite(not m, 'defaulttemplatenotfound',
1238 fm.condwrite(not m, 'defaulttemplatenotfound',
1238 _(" template '%s' not found\n"), "default")
1239 _(" template '%s' not found\n"), "default")
1239 if not p:
1240 if not p:
1240 problems += 1
1241 problems += 1
1241 fm.condwrite(not p, '',
1242 fm.condwrite(not p, '',
1242 _(" (templates seem to have been installed incorrectly)\n"))
1243 _(" (templates seem to have been installed incorrectly)\n"))
1243
1244
1244 # editor
1245 # editor
1245 editor = ui.geteditor()
1246 editor = ui.geteditor()
1246 editor = util.expandpath(editor)
1247 editor = util.expandpath(editor)
1247 editorbin = util.shellsplit(editor)[0]
1248 editorbin = util.shellsplit(editor)[0]
1248 fm.write('editor', _("checking commit editor... (%s)\n"), editorbin)
1249 fm.write('editor', _("checking commit editor... (%s)\n"), editorbin)
1249 cmdpath = util.findexe(editorbin)
1250 cmdpath = util.findexe(editorbin)
1250 fm.condwrite(not cmdpath and editor == 'vi', 'vinotfound',
1251 fm.condwrite(not cmdpath and editor == 'vi', 'vinotfound',
1251 _(" No commit editor set and can't find %s in PATH\n"
1252 _(" No commit editor set and can't find %s in PATH\n"
1252 " (specify a commit editor in your configuration"
1253 " (specify a commit editor in your configuration"
1253 " file)\n"), not cmdpath and editor == 'vi' and editorbin)
1254 " file)\n"), not cmdpath and editor == 'vi' and editorbin)
1254 fm.condwrite(not cmdpath and editor != 'vi', 'editornotfound',
1255 fm.condwrite(not cmdpath and editor != 'vi', 'editornotfound',
1255 _(" Can't find editor '%s' in PATH\n"
1256 _(" Can't find editor '%s' in PATH\n"
1256 " (specify a commit editor in your configuration"
1257 " (specify a commit editor in your configuration"
1257 " file)\n"), not cmdpath and editorbin)
1258 " file)\n"), not cmdpath and editorbin)
1258 if not cmdpath and editor != 'vi':
1259 if not cmdpath and editor != 'vi':
1259 problems += 1
1260 problems += 1
1260
1261
1261 # check username
1262 # check username
1262 username = None
1263 username = None
1263 err = None
1264 err = None
1264 try:
1265 try:
1265 username = ui.username()
1266 username = ui.username()
1266 except error.Abort as e:
1267 except error.Abort as e:
1267 err = util.forcebytestr(e)
1268 err = util.forcebytestr(e)
1268 problems += 1
1269 problems += 1
1269
1270
1270 fm.condwrite(username, 'username', _("checking username (%s)\n"), username)
1271 fm.condwrite(username, 'username', _("checking username (%s)\n"), username)
1271 fm.condwrite(err, 'usernameerror', _("checking username...\n %s\n"
1272 fm.condwrite(err, 'usernameerror', _("checking username...\n %s\n"
1272 " (specify a username in your configuration file)\n"), err)
1273 " (specify a username in your configuration file)\n"), err)
1273
1274
1274 fm.condwrite(not problems, '',
1275 fm.condwrite(not problems, '',
1275 _("no problems detected\n"))
1276 _("no problems detected\n"))
1276 if not problems:
1277 if not problems:
1277 fm.data(problems=problems)
1278 fm.data(problems=problems)
1278 fm.condwrite(problems, 'problems',
1279 fm.condwrite(problems, 'problems',
1279 _("%d problems detected,"
1280 _("%d problems detected,"
1280 " please check your install!\n"), problems)
1281 " please check your install!\n"), problems)
1281 fm.end()
1282 fm.end()
1282
1283
1283 return problems
1284 return problems
1284
1285
1285 @command('debugknown', [], _('REPO ID...'), norepo=True)
1286 @command('debugknown', [], _('REPO ID...'), norepo=True)
1286 def debugknown(ui, repopath, *ids, **opts):
1287 def debugknown(ui, repopath, *ids, **opts):
1287 """test whether node ids are known to a repo
1288 """test whether node ids are known to a repo
1288
1289
1289 Every ID must be a full-length hex node id string. Returns a list of 0s
1290 Every ID must be a full-length hex node id string. Returns a list of 0s
1290 and 1s indicating unknown/known.
1291 and 1s indicating unknown/known.
1291 """
1292 """
1292 opts = pycompat.byteskwargs(opts)
1293 opts = pycompat.byteskwargs(opts)
1293 repo = hg.peer(ui, opts, repopath)
1294 repo = hg.peer(ui, opts, repopath)
1294 if not repo.capable('known'):
1295 if not repo.capable('known'):
1295 raise error.Abort("known() not supported by target repository")
1296 raise error.Abort("known() not supported by target repository")
1296 flags = repo.known([bin(s) for s in ids])
1297 flags = repo.known([bin(s) for s in ids])
1297 ui.write("%s\n" % ("".join([f and "1" or "0" for f in flags])))
1298 ui.write("%s\n" % ("".join([f and "1" or "0" for f in flags])))
1298
1299
1299 @command('debuglabelcomplete', [], _('LABEL...'))
1300 @command('debuglabelcomplete', [], _('LABEL...'))
1300 def debuglabelcomplete(ui, repo, *args):
1301 def debuglabelcomplete(ui, repo, *args):
1301 '''backwards compatibility with old bash completion scripts (DEPRECATED)'''
1302 '''backwards compatibility with old bash completion scripts (DEPRECATED)'''
1302 debugnamecomplete(ui, repo, *args)
1303 debugnamecomplete(ui, repo, *args)
1303
1304
1304 @command('debuglocks',
1305 @command('debuglocks',
1305 [('L', 'force-lock', None, _('free the store lock (DANGEROUS)')),
1306 [('L', 'force-lock', None, _('free the store lock (DANGEROUS)')),
1306 ('W', 'force-wlock', None,
1307 ('W', 'force-wlock', None,
1307 _('free the working state lock (DANGEROUS)')),
1308 _('free the working state lock (DANGEROUS)')),
1308 ('s', 'set-lock', None, _('set the store lock until stopped')),
1309 ('s', 'set-lock', None, _('set the store lock until stopped')),
1309 ('S', 'set-wlock', None,
1310 ('S', 'set-wlock', None,
1310 _('set the working state lock until stopped'))],
1311 _('set the working state lock until stopped'))],
1311 _('[OPTION]...'))
1312 _('[OPTION]...'))
1312 def debuglocks(ui, repo, **opts):
1313 def debuglocks(ui, repo, **opts):
1313 """show or modify state of locks
1314 """show or modify state of locks
1314
1315
1315 By default, this command will show which locks are held. This
1316 By default, this command will show which locks are held. This
1316 includes the user and process holding the lock, the amount of time
1317 includes the user and process holding the lock, the amount of time
1317 the lock has been held, and the machine name where the process is
1318 the lock has been held, and the machine name where the process is
1318 running if it's not local.
1319 running if it's not local.
1319
1320
1320 Locks protect the integrity of Mercurial's data, so should be
1321 Locks protect the integrity of Mercurial's data, so should be
1321 treated with care. System crashes or other interruptions may cause
1322 treated with care. System crashes or other interruptions may cause
1322 locks to not be properly released, though Mercurial will usually
1323 locks to not be properly released, though Mercurial will usually
1323 detect and remove such stale locks automatically.
1324 detect and remove such stale locks automatically.
1324
1325
1325 However, detecting stale locks may not always be possible (for
1326 However, detecting stale locks may not always be possible (for
1326 instance, on a shared filesystem). Removing locks may also be
1327 instance, on a shared filesystem). Removing locks may also be
1327 blocked by filesystem permissions.
1328 blocked by filesystem permissions.
1328
1329
1329 Setting a lock will prevent other commands from changing the data.
1330 Setting a lock will prevent other commands from changing the data.
1330 The command will wait until an interruption (SIGINT, SIGTERM, ...) occurs.
1331 The command will wait until an interruption (SIGINT, SIGTERM, ...) occurs.
1331 The set locks are removed when the command exits.
1332 The set locks are removed when the command exits.
1332
1333
1333 Returns 0 if no locks are held.
1334 Returns 0 if no locks are held.
1334
1335
1335 """
1336 """
1336
1337
1337 if opts.get(r'force_lock'):
1338 if opts.get(r'force_lock'):
1338 repo.svfs.unlink('lock')
1339 repo.svfs.unlink('lock')
1339 if opts.get(r'force_wlock'):
1340 if opts.get(r'force_wlock'):
1340 repo.vfs.unlink('wlock')
1341 repo.vfs.unlink('wlock')
1341 if opts.get(r'force_lock') or opts.get(r'force_wlock'):
1342 if opts.get(r'force_lock') or opts.get(r'force_wlock'):
1342 return 0
1343 return 0
1343
1344
1344 locks = []
1345 locks = []
1345 try:
1346 try:
1346 if opts.get(r'set_wlock'):
1347 if opts.get(r'set_wlock'):
1347 try:
1348 try:
1348 locks.append(repo.wlock(False))
1349 locks.append(repo.wlock(False))
1349 except error.LockHeld:
1350 except error.LockHeld:
1350 raise error.Abort(_('wlock is already held'))
1351 raise error.Abort(_('wlock is already held'))
1351 if opts.get(r'set_lock'):
1352 if opts.get(r'set_lock'):
1352 try:
1353 try:
1353 locks.append(repo.lock(False))
1354 locks.append(repo.lock(False))
1354 except error.LockHeld:
1355 except error.LockHeld:
1355 raise error.Abort(_('lock is already held'))
1356 raise error.Abort(_('lock is already held'))
1356 if len(locks):
1357 if len(locks):
1357 ui.promptchoice(_("ready to release the lock (y)? $$ &Yes"))
1358 ui.promptchoice(_("ready to release the lock (y)? $$ &Yes"))
1358 return 0
1359 return 0
1359 finally:
1360 finally:
1360 release(*locks)
1361 release(*locks)
1361
1362
1362 now = time.time()
1363 now = time.time()
1363 held = 0
1364 held = 0
1364
1365
1365 def report(vfs, name, method):
1366 def report(vfs, name, method):
1366 # this causes stale locks to get reaped for more accurate reporting
1367 # this causes stale locks to get reaped for more accurate reporting
1367 try:
1368 try:
1368 l = method(False)
1369 l = method(False)
1369 except error.LockHeld:
1370 except error.LockHeld:
1370 l = None
1371 l = None
1371
1372
1372 if l:
1373 if l:
1373 l.release()
1374 l.release()
1374 else:
1375 else:
1375 try:
1376 try:
1376 stat = vfs.lstat(name)
1377 st = vfs.lstat(name)
1377 age = now - stat.st_mtime
1378 age = now - st[stat.ST_MTIME]
1378 user = util.username(stat.st_uid)
1379 user = util.username(st.st_uid)
1379 locker = vfs.readlock(name)
1380 locker = vfs.readlock(name)
1380 if ":" in locker:
1381 if ":" in locker:
1381 host, pid = locker.split(':')
1382 host, pid = locker.split(':')
1382 if host == socket.gethostname():
1383 if host == socket.gethostname():
1383 locker = 'user %s, process %s' % (user, pid)
1384 locker = 'user %s, process %s' % (user, pid)
1384 else:
1385 else:
1385 locker = 'user %s, process %s, host %s' \
1386 locker = 'user %s, process %s, host %s' \
1386 % (user, pid, host)
1387 % (user, pid, host)
1387 ui.write(("%-6s %s (%ds)\n") % (name + ":", locker, age))
1388 ui.write(("%-6s %s (%ds)\n") % (name + ":", locker, age))
1388 return 1
1389 return 1
1389 except OSError as e:
1390 except OSError as e:
1390 if e.errno != errno.ENOENT:
1391 if e.errno != errno.ENOENT:
1391 raise
1392 raise
1392
1393
1393 ui.write(("%-6s free\n") % (name + ":"))
1394 ui.write(("%-6s free\n") % (name + ":"))
1394 return 0
1395 return 0
1395
1396
1396 held += report(repo.svfs, "lock", repo.lock)
1397 held += report(repo.svfs, "lock", repo.lock)
1397 held += report(repo.vfs, "wlock", repo.wlock)
1398 held += report(repo.vfs, "wlock", repo.wlock)
1398
1399
1399 return held
1400 return held
1400
1401
1401 @command('debugmergestate', [], '')
1402 @command('debugmergestate', [], '')
1402 def debugmergestate(ui, repo, *args):
1403 def debugmergestate(ui, repo, *args):
1403 """print merge state
1404 """print merge state
1404
1405
1405 Use --verbose to print out information about whether v1 or v2 merge state
1406 Use --verbose to print out information about whether v1 or v2 merge state
1406 was chosen."""
1407 was chosen."""
1407 def _hashornull(h):
1408 def _hashornull(h):
1408 if h == nullhex:
1409 if h == nullhex:
1409 return 'null'
1410 return 'null'
1410 else:
1411 else:
1411 return h
1412 return h
1412
1413
1413 def printrecords(version):
1414 def printrecords(version):
1414 ui.write(('* version %d records\n') % version)
1415 ui.write(('* version %d records\n') % version)
1415 if version == 1:
1416 if version == 1:
1416 records = v1records
1417 records = v1records
1417 else:
1418 else:
1418 records = v2records
1419 records = v2records
1419
1420
1420 for rtype, record in records:
1421 for rtype, record in records:
1421 # pretty print some record types
1422 # pretty print some record types
1422 if rtype == 'L':
1423 if rtype == 'L':
1423 ui.write(('local: %s\n') % record)
1424 ui.write(('local: %s\n') % record)
1424 elif rtype == 'O':
1425 elif rtype == 'O':
1425 ui.write(('other: %s\n') % record)
1426 ui.write(('other: %s\n') % record)
1426 elif rtype == 'm':
1427 elif rtype == 'm':
1427 driver, mdstate = record.split('\0', 1)
1428 driver, mdstate = record.split('\0', 1)
1428 ui.write(('merge driver: %s (state "%s")\n')
1429 ui.write(('merge driver: %s (state "%s")\n')
1429 % (driver, mdstate))
1430 % (driver, mdstate))
1430 elif rtype in 'FDC':
1431 elif rtype in 'FDC':
1431 r = record.split('\0')
1432 r = record.split('\0')
1432 f, state, hash, lfile, afile, anode, ofile = r[0:7]
1433 f, state, hash, lfile, afile, anode, ofile = r[0:7]
1433 if version == 1:
1434 if version == 1:
1434 onode = 'not stored in v1 format'
1435 onode = 'not stored in v1 format'
1435 flags = r[7]
1436 flags = r[7]
1436 else:
1437 else:
1437 onode, flags = r[7:9]
1438 onode, flags = r[7:9]
1438 ui.write(('file: %s (record type "%s", state "%s", hash %s)\n')
1439 ui.write(('file: %s (record type "%s", state "%s", hash %s)\n')
1439 % (f, rtype, state, _hashornull(hash)))
1440 % (f, rtype, state, _hashornull(hash)))
1440 ui.write((' local path: %s (flags "%s")\n') % (lfile, flags))
1441 ui.write((' local path: %s (flags "%s")\n') % (lfile, flags))
1441 ui.write((' ancestor path: %s (node %s)\n')
1442 ui.write((' ancestor path: %s (node %s)\n')
1442 % (afile, _hashornull(anode)))
1443 % (afile, _hashornull(anode)))
1443 ui.write((' other path: %s (node %s)\n')
1444 ui.write((' other path: %s (node %s)\n')
1444 % (ofile, _hashornull(onode)))
1445 % (ofile, _hashornull(onode)))
1445 elif rtype == 'f':
1446 elif rtype == 'f':
1446 filename, rawextras = record.split('\0', 1)
1447 filename, rawextras = record.split('\0', 1)
1447 extras = rawextras.split('\0')
1448 extras = rawextras.split('\0')
1448 i = 0
1449 i = 0
1449 extrastrings = []
1450 extrastrings = []
1450 while i < len(extras):
1451 while i < len(extras):
1451 extrastrings.append('%s = %s' % (extras[i], extras[i + 1]))
1452 extrastrings.append('%s = %s' % (extras[i], extras[i + 1]))
1452 i += 2
1453 i += 2
1453
1454
1454 ui.write(('file extras: %s (%s)\n')
1455 ui.write(('file extras: %s (%s)\n')
1455 % (filename, ', '.join(extrastrings)))
1456 % (filename, ', '.join(extrastrings)))
1456 elif rtype == 'l':
1457 elif rtype == 'l':
1457 labels = record.split('\0', 2)
1458 labels = record.split('\0', 2)
1458 labels = [l for l in labels if len(l) > 0]
1459 labels = [l for l in labels if len(l) > 0]
1459 ui.write(('labels:\n'))
1460 ui.write(('labels:\n'))
1460 ui.write((' local: %s\n' % labels[0]))
1461 ui.write((' local: %s\n' % labels[0]))
1461 ui.write((' other: %s\n' % labels[1]))
1462 ui.write((' other: %s\n' % labels[1]))
1462 if len(labels) > 2:
1463 if len(labels) > 2:
1463 ui.write((' base: %s\n' % labels[2]))
1464 ui.write((' base: %s\n' % labels[2]))
1464 else:
1465 else:
1465 ui.write(('unrecognized entry: %s\t%s\n')
1466 ui.write(('unrecognized entry: %s\t%s\n')
1466 % (rtype, record.replace('\0', '\t')))
1467 % (rtype, record.replace('\0', '\t')))
1467
1468
1468 # Avoid mergestate.read() since it may raise an exception for unsupported
1469 # Avoid mergestate.read() since it may raise an exception for unsupported
1469 # merge state records. We shouldn't be doing this, but this is OK since this
1470 # merge state records. We shouldn't be doing this, but this is OK since this
1470 # command is pretty low-level.
1471 # command is pretty low-level.
1471 ms = mergemod.mergestate(repo)
1472 ms = mergemod.mergestate(repo)
1472
1473
1473 # sort so that reasonable information is on top
1474 # sort so that reasonable information is on top
1474 v1records = ms._readrecordsv1()
1475 v1records = ms._readrecordsv1()
1475 v2records = ms._readrecordsv2()
1476 v2records = ms._readrecordsv2()
1476 order = 'LOml'
1477 order = 'LOml'
1477 def key(r):
1478 def key(r):
1478 idx = order.find(r[0])
1479 idx = order.find(r[0])
1479 if idx == -1:
1480 if idx == -1:
1480 return (1, r[1])
1481 return (1, r[1])
1481 else:
1482 else:
1482 return (0, idx)
1483 return (0, idx)
1483 v1records.sort(key=key)
1484 v1records.sort(key=key)
1484 v2records.sort(key=key)
1485 v2records.sort(key=key)
1485
1486
1486 if not v1records and not v2records:
1487 if not v1records and not v2records:
1487 ui.write(('no merge state found\n'))
1488 ui.write(('no merge state found\n'))
1488 elif not v2records:
1489 elif not v2records:
1489 ui.note(('no version 2 merge state\n'))
1490 ui.note(('no version 2 merge state\n'))
1490 printrecords(1)
1491 printrecords(1)
1491 elif ms._v1v2match(v1records, v2records):
1492 elif ms._v1v2match(v1records, v2records):
1492 ui.note(('v1 and v2 states match: using v2\n'))
1493 ui.note(('v1 and v2 states match: using v2\n'))
1493 printrecords(2)
1494 printrecords(2)
1494 else:
1495 else:
1495 ui.note(('v1 and v2 states mismatch: using v1\n'))
1496 ui.note(('v1 and v2 states mismatch: using v1\n'))
1496 printrecords(1)
1497 printrecords(1)
1497 if ui.verbose:
1498 if ui.verbose:
1498 printrecords(2)
1499 printrecords(2)
1499
1500
1500 @command('debugnamecomplete', [], _('NAME...'))
1501 @command('debugnamecomplete', [], _('NAME...'))
1501 def debugnamecomplete(ui, repo, *args):
1502 def debugnamecomplete(ui, repo, *args):
1502 '''complete "names" - tags, open branch names, bookmark names'''
1503 '''complete "names" - tags, open branch names, bookmark names'''
1503
1504
1504 names = set()
1505 names = set()
1505 # since we previously only listed open branches, we will handle that
1506 # since we previously only listed open branches, we will handle that
1506 # specially (after this for loop)
1507 # specially (after this for loop)
1507 for name, ns in repo.names.iteritems():
1508 for name, ns in repo.names.iteritems():
1508 if name != 'branches':
1509 if name != 'branches':
1509 names.update(ns.listnames(repo))
1510 names.update(ns.listnames(repo))
1510 names.update(tag for (tag, heads, tip, closed)
1511 names.update(tag for (tag, heads, tip, closed)
1511 in repo.branchmap().iterbranches() if not closed)
1512 in repo.branchmap().iterbranches() if not closed)
1512 completions = set()
1513 completions = set()
1513 if not args:
1514 if not args:
1514 args = ['']
1515 args = ['']
1515 for a in args:
1516 for a in args:
1516 completions.update(n for n in names if n.startswith(a))
1517 completions.update(n for n in names if n.startswith(a))
1517 ui.write('\n'.join(sorted(completions)))
1518 ui.write('\n'.join(sorted(completions)))
1518 ui.write('\n')
1519 ui.write('\n')
1519
1520
1520 @command('debugobsolete',
1521 @command('debugobsolete',
1521 [('', 'flags', 0, _('markers flag')),
1522 [('', 'flags', 0, _('markers flag')),
1522 ('', 'record-parents', False,
1523 ('', 'record-parents', False,
1523 _('record parent information for the precursor')),
1524 _('record parent information for the precursor')),
1524 ('r', 'rev', [], _('display markers relevant to REV')),
1525 ('r', 'rev', [], _('display markers relevant to REV')),
1525 ('', 'exclusive', False, _('restrict display to markers only '
1526 ('', 'exclusive', False, _('restrict display to markers only '
1526 'relevant to REV')),
1527 'relevant to REV')),
1527 ('', 'index', False, _('display index of the marker')),
1528 ('', 'index', False, _('display index of the marker')),
1528 ('', 'delete', [], _('delete markers specified by indices')),
1529 ('', 'delete', [], _('delete markers specified by indices')),
1529 ] + cmdutil.commitopts2 + cmdutil.formatteropts,
1530 ] + cmdutil.commitopts2 + cmdutil.formatteropts,
1530 _('[OBSOLETED [REPLACEMENT ...]]'))
1531 _('[OBSOLETED [REPLACEMENT ...]]'))
1531 def debugobsolete(ui, repo, precursor=None, *successors, **opts):
1532 def debugobsolete(ui, repo, precursor=None, *successors, **opts):
1532 """create arbitrary obsolete marker
1533 """create arbitrary obsolete marker
1533
1534
1534 With no arguments, displays the list of obsolescence markers."""
1535 With no arguments, displays the list of obsolescence markers."""
1535
1536
1536 opts = pycompat.byteskwargs(opts)
1537 opts = pycompat.byteskwargs(opts)
1537
1538
1538 def parsenodeid(s):
1539 def parsenodeid(s):
1539 try:
1540 try:
1540 # We do not use revsingle/revrange functions here to accept
1541 # We do not use revsingle/revrange functions here to accept
1541 # arbitrary node identifiers, possibly not present in the
1542 # arbitrary node identifiers, possibly not present in the
1542 # local repository.
1543 # local repository.
1543 n = bin(s)
1544 n = bin(s)
1544 if len(n) != len(nullid):
1545 if len(n) != len(nullid):
1545 raise TypeError()
1546 raise TypeError()
1546 return n
1547 return n
1547 except TypeError:
1548 except TypeError:
1548 raise error.Abort('changeset references must be full hexadecimal '
1549 raise error.Abort('changeset references must be full hexadecimal '
1549 'node identifiers')
1550 'node identifiers')
1550
1551
1551 if opts.get('delete'):
1552 if opts.get('delete'):
1552 indices = []
1553 indices = []
1553 for v in opts.get('delete'):
1554 for v in opts.get('delete'):
1554 try:
1555 try:
1555 indices.append(int(v))
1556 indices.append(int(v))
1556 except ValueError:
1557 except ValueError:
1557 raise error.Abort(_('invalid index value: %r') % v,
1558 raise error.Abort(_('invalid index value: %r') % v,
1558 hint=_('use integers for indices'))
1559 hint=_('use integers for indices'))
1559
1560
1560 if repo.currenttransaction():
1561 if repo.currenttransaction():
1561 raise error.Abort(_('cannot delete obsmarkers in the middle '
1562 raise error.Abort(_('cannot delete obsmarkers in the middle '
1562 'of transaction.'))
1563 'of transaction.'))
1563
1564
1564 with repo.lock():
1565 with repo.lock():
1565 n = repair.deleteobsmarkers(repo.obsstore, indices)
1566 n = repair.deleteobsmarkers(repo.obsstore, indices)
1566 ui.write(_('deleted %i obsolescence markers\n') % n)
1567 ui.write(_('deleted %i obsolescence markers\n') % n)
1567
1568
1568 return
1569 return
1569
1570
1570 if precursor is not None:
1571 if precursor is not None:
1571 if opts['rev']:
1572 if opts['rev']:
1572 raise error.Abort('cannot select revision when creating marker')
1573 raise error.Abort('cannot select revision when creating marker')
1573 metadata = {}
1574 metadata = {}
1574 metadata['user'] = opts['user'] or ui.username()
1575 metadata['user'] = opts['user'] or ui.username()
1575 succs = tuple(parsenodeid(succ) for succ in successors)
1576 succs = tuple(parsenodeid(succ) for succ in successors)
1576 l = repo.lock()
1577 l = repo.lock()
1577 try:
1578 try:
1578 tr = repo.transaction('debugobsolete')
1579 tr = repo.transaction('debugobsolete')
1579 try:
1580 try:
1580 date = opts.get('date')
1581 date = opts.get('date')
1581 if date:
1582 if date:
1582 date = dateutil.parsedate(date)
1583 date = dateutil.parsedate(date)
1583 else:
1584 else:
1584 date = None
1585 date = None
1585 prec = parsenodeid(precursor)
1586 prec = parsenodeid(precursor)
1586 parents = None
1587 parents = None
1587 if opts['record_parents']:
1588 if opts['record_parents']:
1588 if prec not in repo.unfiltered():
1589 if prec not in repo.unfiltered():
1589 raise error.Abort('cannot used --record-parents on '
1590 raise error.Abort('cannot used --record-parents on '
1590 'unknown changesets')
1591 'unknown changesets')
1591 parents = repo.unfiltered()[prec].parents()
1592 parents = repo.unfiltered()[prec].parents()
1592 parents = tuple(p.node() for p in parents)
1593 parents = tuple(p.node() for p in parents)
1593 repo.obsstore.create(tr, prec, succs, opts['flags'],
1594 repo.obsstore.create(tr, prec, succs, opts['flags'],
1594 parents=parents, date=date,
1595 parents=parents, date=date,
1595 metadata=metadata, ui=ui)
1596 metadata=metadata, ui=ui)
1596 tr.close()
1597 tr.close()
1597 except ValueError as exc:
1598 except ValueError as exc:
1598 raise error.Abort(_('bad obsmarker input: %s') %
1599 raise error.Abort(_('bad obsmarker input: %s') %
1599 pycompat.bytestr(exc))
1600 pycompat.bytestr(exc))
1600 finally:
1601 finally:
1601 tr.release()
1602 tr.release()
1602 finally:
1603 finally:
1603 l.release()
1604 l.release()
1604 else:
1605 else:
1605 if opts['rev']:
1606 if opts['rev']:
1606 revs = scmutil.revrange(repo, opts['rev'])
1607 revs = scmutil.revrange(repo, opts['rev'])
1607 nodes = [repo[r].node() for r in revs]
1608 nodes = [repo[r].node() for r in revs]
1608 markers = list(obsutil.getmarkers(repo, nodes=nodes,
1609 markers = list(obsutil.getmarkers(repo, nodes=nodes,
1609 exclusive=opts['exclusive']))
1610 exclusive=opts['exclusive']))
1610 markers.sort(key=lambda x: x._data)
1611 markers.sort(key=lambda x: x._data)
1611 else:
1612 else:
1612 markers = obsutil.getmarkers(repo)
1613 markers = obsutil.getmarkers(repo)
1613
1614
1614 markerstoiter = markers
1615 markerstoiter = markers
1615 isrelevant = lambda m: True
1616 isrelevant = lambda m: True
1616 if opts.get('rev') and opts.get('index'):
1617 if opts.get('rev') and opts.get('index'):
1617 markerstoiter = obsutil.getmarkers(repo)
1618 markerstoiter = obsutil.getmarkers(repo)
1618 markerset = set(markers)
1619 markerset = set(markers)
1619 isrelevant = lambda m: m in markerset
1620 isrelevant = lambda m: m in markerset
1620
1621
1621 fm = ui.formatter('debugobsolete', opts)
1622 fm = ui.formatter('debugobsolete', opts)
1622 for i, m in enumerate(markerstoiter):
1623 for i, m in enumerate(markerstoiter):
1623 if not isrelevant(m):
1624 if not isrelevant(m):
1624 # marker can be irrelevant when we're iterating over a set
1625 # marker can be irrelevant when we're iterating over a set
1625 # of markers (markerstoiter) which is bigger than the set
1626 # of markers (markerstoiter) which is bigger than the set
1626 # of markers we want to display (markers)
1627 # of markers we want to display (markers)
1627 # this can happen if both --index and --rev options are
1628 # this can happen if both --index and --rev options are
1628 # provided and thus we need to iterate over all of the markers
1629 # provided and thus we need to iterate over all of the markers
1629 # to get the correct indices, but only display the ones that
1630 # to get the correct indices, but only display the ones that
1630 # are relevant to --rev value
1631 # are relevant to --rev value
1631 continue
1632 continue
1632 fm.startitem()
1633 fm.startitem()
1633 ind = i if opts.get('index') else None
1634 ind = i if opts.get('index') else None
1634 cmdutil.showmarker(fm, m, index=ind)
1635 cmdutil.showmarker(fm, m, index=ind)
1635 fm.end()
1636 fm.end()
1636
1637
1637 @command('debugpathcomplete',
1638 @command('debugpathcomplete',
1638 [('f', 'full', None, _('complete an entire path')),
1639 [('f', 'full', None, _('complete an entire path')),
1639 ('n', 'normal', None, _('show only normal files')),
1640 ('n', 'normal', None, _('show only normal files')),
1640 ('a', 'added', None, _('show only added files')),
1641 ('a', 'added', None, _('show only added files')),
1641 ('r', 'removed', None, _('show only removed files'))],
1642 ('r', 'removed', None, _('show only removed files'))],
1642 _('FILESPEC...'))
1643 _('FILESPEC...'))
1643 def debugpathcomplete(ui, repo, *specs, **opts):
1644 def debugpathcomplete(ui, repo, *specs, **opts):
1644 '''complete part or all of a tracked path
1645 '''complete part or all of a tracked path
1645
1646
1646 This command supports shells that offer path name completion. It
1647 This command supports shells that offer path name completion. It
1647 currently completes only files already known to the dirstate.
1648 currently completes only files already known to the dirstate.
1648
1649
1649 Completion extends only to the next path segment unless
1650 Completion extends only to the next path segment unless
1650 --full is specified, in which case entire paths are used.'''
1651 --full is specified, in which case entire paths are used.'''
1651
1652
1652 def complete(path, acceptable):
1653 def complete(path, acceptable):
1653 dirstate = repo.dirstate
1654 dirstate = repo.dirstate
1654 spec = os.path.normpath(os.path.join(pycompat.getcwd(), path))
1655 spec = os.path.normpath(os.path.join(pycompat.getcwd(), path))
1655 rootdir = repo.root + pycompat.ossep
1656 rootdir = repo.root + pycompat.ossep
1656 if spec != repo.root and not spec.startswith(rootdir):
1657 if spec != repo.root and not spec.startswith(rootdir):
1657 return [], []
1658 return [], []
1658 if os.path.isdir(spec):
1659 if os.path.isdir(spec):
1659 spec += '/'
1660 spec += '/'
1660 spec = spec[len(rootdir):]
1661 spec = spec[len(rootdir):]
1661 fixpaths = pycompat.ossep != '/'
1662 fixpaths = pycompat.ossep != '/'
1662 if fixpaths:
1663 if fixpaths:
1663 spec = spec.replace(pycompat.ossep, '/')
1664 spec = spec.replace(pycompat.ossep, '/')
1664 speclen = len(spec)
1665 speclen = len(spec)
1665 fullpaths = opts[r'full']
1666 fullpaths = opts[r'full']
1666 files, dirs = set(), set()
1667 files, dirs = set(), set()
1667 adddir, addfile = dirs.add, files.add
1668 adddir, addfile = dirs.add, files.add
1668 for f, st in dirstate.iteritems():
1669 for f, st in dirstate.iteritems():
1669 if f.startswith(spec) and st[0] in acceptable:
1670 if f.startswith(spec) and st[0] in acceptable:
1670 if fixpaths:
1671 if fixpaths:
1671 f = f.replace('/', pycompat.ossep)
1672 f = f.replace('/', pycompat.ossep)
1672 if fullpaths:
1673 if fullpaths:
1673 addfile(f)
1674 addfile(f)
1674 continue
1675 continue
1675 s = f.find(pycompat.ossep, speclen)
1676 s = f.find(pycompat.ossep, speclen)
1676 if s >= 0:
1677 if s >= 0:
1677 adddir(f[:s])
1678 adddir(f[:s])
1678 else:
1679 else:
1679 addfile(f)
1680 addfile(f)
1680 return files, dirs
1681 return files, dirs
1681
1682
1682 acceptable = ''
1683 acceptable = ''
1683 if opts[r'normal']:
1684 if opts[r'normal']:
1684 acceptable += 'nm'
1685 acceptable += 'nm'
1685 if opts[r'added']:
1686 if opts[r'added']:
1686 acceptable += 'a'
1687 acceptable += 'a'
1687 if opts[r'removed']:
1688 if opts[r'removed']:
1688 acceptable += 'r'
1689 acceptable += 'r'
1689 cwd = repo.getcwd()
1690 cwd = repo.getcwd()
1690 if not specs:
1691 if not specs:
1691 specs = ['.']
1692 specs = ['.']
1692
1693
1693 files, dirs = set(), set()
1694 files, dirs = set(), set()
1694 for spec in specs:
1695 for spec in specs:
1695 f, d = complete(spec, acceptable or 'nmar')
1696 f, d = complete(spec, acceptable or 'nmar')
1696 files.update(f)
1697 files.update(f)
1697 dirs.update(d)
1698 dirs.update(d)
1698 files.update(dirs)
1699 files.update(dirs)
1699 ui.write('\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
1700 ui.write('\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
1700 ui.write('\n')
1701 ui.write('\n')
1701
1702
1702 @command('debugpeer', [], _('PATH'), norepo=True)
1703 @command('debugpeer', [], _('PATH'), norepo=True)
1703 def debugpeer(ui, path):
1704 def debugpeer(ui, path):
1704 """establish a connection to a peer repository"""
1705 """establish a connection to a peer repository"""
1705 # Always enable peer request logging. Requires --debug to display
1706 # Always enable peer request logging. Requires --debug to display
1706 # though.
1707 # though.
1707 overrides = {
1708 overrides = {
1708 ('devel', 'debug.peer-request'): True,
1709 ('devel', 'debug.peer-request'): True,
1709 }
1710 }
1710
1711
1711 with ui.configoverride(overrides):
1712 with ui.configoverride(overrides):
1712 peer = hg.peer(ui, {}, path)
1713 peer = hg.peer(ui, {}, path)
1713
1714
1714 local = peer.local() is not None
1715 local = peer.local() is not None
1715 canpush = peer.canpush()
1716 canpush = peer.canpush()
1716
1717
1717 ui.write(_('url: %s\n') % peer.url())
1718 ui.write(_('url: %s\n') % peer.url())
1718 ui.write(_('local: %s\n') % (_('yes') if local else _('no')))
1719 ui.write(_('local: %s\n') % (_('yes') if local else _('no')))
1719 ui.write(_('pushable: %s\n') % (_('yes') if canpush else _('no')))
1720 ui.write(_('pushable: %s\n') % (_('yes') if canpush else _('no')))
1720
1721
1721 @command('debugpickmergetool',
1722 @command('debugpickmergetool',
1722 [('r', 'rev', '', _('check for files in this revision'), _('REV')),
1723 [('r', 'rev', '', _('check for files in this revision'), _('REV')),
1723 ('', 'changedelete', None, _('emulate merging change and delete')),
1724 ('', 'changedelete', None, _('emulate merging change and delete')),
1724 ] + cmdutil.walkopts + cmdutil.mergetoolopts,
1725 ] + cmdutil.walkopts + cmdutil.mergetoolopts,
1725 _('[PATTERN]...'),
1726 _('[PATTERN]...'),
1726 inferrepo=True)
1727 inferrepo=True)
1727 def debugpickmergetool(ui, repo, *pats, **opts):
1728 def debugpickmergetool(ui, repo, *pats, **opts):
1728 """examine which merge tool is chosen for specified file
1729 """examine which merge tool is chosen for specified file
1729
1730
1730 As described in :hg:`help merge-tools`, Mercurial examines
1731 As described in :hg:`help merge-tools`, Mercurial examines
1731 configurations below in this order to decide which merge tool is
1732 configurations below in this order to decide which merge tool is
1732 chosen for specified file.
1733 chosen for specified file.
1733
1734
1734 1. ``--tool`` option
1735 1. ``--tool`` option
1735 2. ``HGMERGE`` environment variable
1736 2. ``HGMERGE`` environment variable
1736 3. configurations in ``merge-patterns`` section
1737 3. configurations in ``merge-patterns`` section
1737 4. configuration of ``ui.merge``
1738 4. configuration of ``ui.merge``
1738 5. configurations in ``merge-tools`` section
1739 5. configurations in ``merge-tools`` section
1739 6. ``hgmerge`` tool (for historical reason only)
1740 6. ``hgmerge`` tool (for historical reason only)
1740 7. default tool for fallback (``:merge`` or ``:prompt``)
1741 7. default tool for fallback (``:merge`` or ``:prompt``)
1741
1742
1742 This command writes out examination result in the style below::
1743 This command writes out examination result in the style below::
1743
1744
1744 FILE = MERGETOOL
1745 FILE = MERGETOOL
1745
1746
1746 By default, all files known in the first parent context of the
1747 By default, all files known in the first parent context of the
1747 working directory are examined. Use file patterns and/or -I/-X
1748 working directory are examined. Use file patterns and/or -I/-X
1748 options to limit target files. -r/--rev is also useful to examine
1749 options to limit target files. -r/--rev is also useful to examine
1749 files in another context without actual updating to it.
1750 files in another context without actual updating to it.
1750
1751
1751 With --debug, this command shows warning messages while matching
1752 With --debug, this command shows warning messages while matching
1752 against ``merge-patterns`` and so on, too. It is recommended to
1753 against ``merge-patterns`` and so on, too. It is recommended to
1753 use this option with explicit file patterns and/or -I/-X options,
1754 use this option with explicit file patterns and/or -I/-X options,
1754 because this option increases amount of output per file according
1755 because this option increases amount of output per file according
1755 to configurations in hgrc.
1756 to configurations in hgrc.
1756
1757
1757 With -v/--verbose, this command shows configurations below at
1758 With -v/--verbose, this command shows configurations below at
1758 first (only if specified).
1759 first (only if specified).
1759
1760
1760 - ``--tool`` option
1761 - ``--tool`` option
1761 - ``HGMERGE`` environment variable
1762 - ``HGMERGE`` environment variable
1762 - configuration of ``ui.merge``
1763 - configuration of ``ui.merge``
1763
1764
1764 If merge tool is chosen before matching against
1765 If merge tool is chosen before matching against
1765 ``merge-patterns``, this command can't show any helpful
1766 ``merge-patterns``, this command can't show any helpful
1766 information, even with --debug. In such case, information above is
1767 information, even with --debug. In such case, information above is
1767 useful to know why a merge tool is chosen.
1768 useful to know why a merge tool is chosen.
1768 """
1769 """
1769 opts = pycompat.byteskwargs(opts)
1770 opts = pycompat.byteskwargs(opts)
1770 overrides = {}
1771 overrides = {}
1771 if opts['tool']:
1772 if opts['tool']:
1772 overrides[('ui', 'forcemerge')] = opts['tool']
1773 overrides[('ui', 'forcemerge')] = opts['tool']
1773 ui.note(('with --tool %r\n') % (pycompat.bytestr(opts['tool'])))
1774 ui.note(('with --tool %r\n') % (pycompat.bytestr(opts['tool'])))
1774
1775
1775 with ui.configoverride(overrides, 'debugmergepatterns'):
1776 with ui.configoverride(overrides, 'debugmergepatterns'):
1776 hgmerge = encoding.environ.get("HGMERGE")
1777 hgmerge = encoding.environ.get("HGMERGE")
1777 if hgmerge is not None:
1778 if hgmerge is not None:
1778 ui.note(('with HGMERGE=%r\n') % (pycompat.bytestr(hgmerge)))
1779 ui.note(('with HGMERGE=%r\n') % (pycompat.bytestr(hgmerge)))
1779 uimerge = ui.config("ui", "merge")
1780 uimerge = ui.config("ui", "merge")
1780 if uimerge:
1781 if uimerge:
1781 ui.note(('with ui.merge=%r\n') % (pycompat.bytestr(uimerge)))
1782 ui.note(('with ui.merge=%r\n') % (pycompat.bytestr(uimerge)))
1782
1783
1783 ctx = scmutil.revsingle(repo, opts.get('rev'))
1784 ctx = scmutil.revsingle(repo, opts.get('rev'))
1784 m = scmutil.match(ctx, pats, opts)
1785 m = scmutil.match(ctx, pats, opts)
1785 changedelete = opts['changedelete']
1786 changedelete = opts['changedelete']
1786 for path in ctx.walk(m):
1787 for path in ctx.walk(m):
1787 fctx = ctx[path]
1788 fctx = ctx[path]
1788 try:
1789 try:
1789 if not ui.debugflag:
1790 if not ui.debugflag:
1790 ui.pushbuffer(error=True)
1791 ui.pushbuffer(error=True)
1791 tool, toolpath = filemerge._picktool(repo, ui, path,
1792 tool, toolpath = filemerge._picktool(repo, ui, path,
1792 fctx.isbinary(),
1793 fctx.isbinary(),
1793 'l' in fctx.flags(),
1794 'l' in fctx.flags(),
1794 changedelete)
1795 changedelete)
1795 finally:
1796 finally:
1796 if not ui.debugflag:
1797 if not ui.debugflag:
1797 ui.popbuffer()
1798 ui.popbuffer()
1798 ui.write(('%s = %s\n') % (path, tool))
1799 ui.write(('%s = %s\n') % (path, tool))
1799
1800
1800 @command('debugpushkey', [], _('REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
1801 @command('debugpushkey', [], _('REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
1801 def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
1802 def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
1802 '''access the pushkey key/value protocol
1803 '''access the pushkey key/value protocol
1803
1804
1804 With two args, list the keys in the given namespace.
1805 With two args, list the keys in the given namespace.
1805
1806
1806 With five args, set a key to new if it currently is set to old.
1807 With five args, set a key to new if it currently is set to old.
1807 Reports success or failure.
1808 Reports success or failure.
1808 '''
1809 '''
1809
1810
1810 target = hg.peer(ui, {}, repopath)
1811 target = hg.peer(ui, {}, repopath)
1811 if keyinfo:
1812 if keyinfo:
1812 key, old, new = keyinfo
1813 key, old, new = keyinfo
1813 r = target.pushkey(namespace, key, old, new)
1814 r = target.pushkey(namespace, key, old, new)
1814 ui.status(pycompat.bytestr(r) + '\n')
1815 ui.status(pycompat.bytestr(r) + '\n')
1815 return not r
1816 return not r
1816 else:
1817 else:
1817 for k, v in sorted(target.listkeys(namespace).iteritems()):
1818 for k, v in sorted(target.listkeys(namespace).iteritems()):
1818 ui.write("%s\t%s\n" % (util.escapestr(k),
1819 ui.write("%s\t%s\n" % (util.escapestr(k),
1819 util.escapestr(v)))
1820 util.escapestr(v)))
1820
1821
1821 @command('debugpvec', [], _('A B'))
1822 @command('debugpvec', [], _('A B'))
1822 def debugpvec(ui, repo, a, b=None):
1823 def debugpvec(ui, repo, a, b=None):
1823 ca = scmutil.revsingle(repo, a)
1824 ca = scmutil.revsingle(repo, a)
1824 cb = scmutil.revsingle(repo, b)
1825 cb = scmutil.revsingle(repo, b)
1825 pa = pvec.ctxpvec(ca)
1826 pa = pvec.ctxpvec(ca)
1826 pb = pvec.ctxpvec(cb)
1827 pb = pvec.ctxpvec(cb)
1827 if pa == pb:
1828 if pa == pb:
1828 rel = "="
1829 rel = "="
1829 elif pa > pb:
1830 elif pa > pb:
1830 rel = ">"
1831 rel = ">"
1831 elif pa < pb:
1832 elif pa < pb:
1832 rel = "<"
1833 rel = "<"
1833 elif pa | pb:
1834 elif pa | pb:
1834 rel = "|"
1835 rel = "|"
1835 ui.write(_("a: %s\n") % pa)
1836 ui.write(_("a: %s\n") % pa)
1836 ui.write(_("b: %s\n") % pb)
1837 ui.write(_("b: %s\n") % pb)
1837 ui.write(_("depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
1838 ui.write(_("depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
1838 ui.write(_("delta: %d hdist: %d distance: %d relation: %s\n") %
1839 ui.write(_("delta: %d hdist: %d distance: %d relation: %s\n") %
1839 (abs(pa._depth - pb._depth), pvec._hamming(pa._vec, pb._vec),
1840 (abs(pa._depth - pb._depth), pvec._hamming(pa._vec, pb._vec),
1840 pa.distance(pb), rel))
1841 pa.distance(pb), rel))
1841
1842
1842 @command('debugrebuilddirstate|debugrebuildstate',
1843 @command('debugrebuilddirstate|debugrebuildstate',
1843 [('r', 'rev', '', _('revision to rebuild to'), _('REV')),
1844 [('r', 'rev', '', _('revision to rebuild to'), _('REV')),
1844 ('', 'minimal', None, _('only rebuild files that are inconsistent with '
1845 ('', 'minimal', None, _('only rebuild files that are inconsistent with '
1845 'the working copy parent')),
1846 'the working copy parent')),
1846 ],
1847 ],
1847 _('[-r REV]'))
1848 _('[-r REV]'))
1848 def debugrebuilddirstate(ui, repo, rev, **opts):
1849 def debugrebuilddirstate(ui, repo, rev, **opts):
1849 """rebuild the dirstate as it would look like for the given revision
1850 """rebuild the dirstate as it would look like for the given revision
1850
1851
1851 If no revision is specified the first current parent will be used.
1852 If no revision is specified the first current parent will be used.
1852
1853
1853 The dirstate will be set to the files of the given revision.
1854 The dirstate will be set to the files of the given revision.
1854 The actual working directory content or existing dirstate
1855 The actual working directory content or existing dirstate
1855 information such as adds or removes is not considered.
1856 information such as adds or removes is not considered.
1856
1857
1857 ``minimal`` will only rebuild the dirstate status for files that claim to be
1858 ``minimal`` will only rebuild the dirstate status for files that claim to be
1858 tracked but are not in the parent manifest, or that exist in the parent
1859 tracked but are not in the parent manifest, or that exist in the parent
1859 manifest but are not in the dirstate. It will not change adds, removes, or
1860 manifest but are not in the dirstate. It will not change adds, removes, or
1860 modified files that are in the working copy parent.
1861 modified files that are in the working copy parent.
1861
1862
1862 One use of this command is to make the next :hg:`status` invocation
1863 One use of this command is to make the next :hg:`status` invocation
1863 check the actual file content.
1864 check the actual file content.
1864 """
1865 """
1865 ctx = scmutil.revsingle(repo, rev)
1866 ctx = scmutil.revsingle(repo, rev)
1866 with repo.wlock():
1867 with repo.wlock():
1867 dirstate = repo.dirstate
1868 dirstate = repo.dirstate
1868 changedfiles = None
1869 changedfiles = None
1869 # See command doc for what minimal does.
1870 # See command doc for what minimal does.
1870 if opts.get(r'minimal'):
1871 if opts.get(r'minimal'):
1871 manifestfiles = set(ctx.manifest().keys())
1872 manifestfiles = set(ctx.manifest().keys())
1872 dirstatefiles = set(dirstate)
1873 dirstatefiles = set(dirstate)
1873 manifestonly = manifestfiles - dirstatefiles
1874 manifestonly = manifestfiles - dirstatefiles
1874 dsonly = dirstatefiles - manifestfiles
1875 dsonly = dirstatefiles - manifestfiles
1875 dsnotadded = set(f for f in dsonly if dirstate[f] != 'a')
1876 dsnotadded = set(f for f in dsonly if dirstate[f] != 'a')
1876 changedfiles = manifestonly | dsnotadded
1877 changedfiles = manifestonly | dsnotadded
1877
1878
1878 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
1879 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
1879
1880
1880 @command('debugrebuildfncache', [], '')
1881 @command('debugrebuildfncache', [], '')
1881 def debugrebuildfncache(ui, repo):
1882 def debugrebuildfncache(ui, repo):
1882 """rebuild the fncache file"""
1883 """rebuild the fncache file"""
1883 repair.rebuildfncache(ui, repo)
1884 repair.rebuildfncache(ui, repo)
1884
1885
1885 @command('debugrename',
1886 @command('debugrename',
1886 [('r', 'rev', '', _('revision to debug'), _('REV'))],
1887 [('r', 'rev', '', _('revision to debug'), _('REV'))],
1887 _('[-r REV] FILE'))
1888 _('[-r REV] FILE'))
1888 def debugrename(ui, repo, file1, *pats, **opts):
1889 def debugrename(ui, repo, file1, *pats, **opts):
1889 """dump rename information"""
1890 """dump rename information"""
1890
1891
1891 opts = pycompat.byteskwargs(opts)
1892 opts = pycompat.byteskwargs(opts)
1892 ctx = scmutil.revsingle(repo, opts.get('rev'))
1893 ctx = scmutil.revsingle(repo, opts.get('rev'))
1893 m = scmutil.match(ctx, (file1,) + pats, opts)
1894 m = scmutil.match(ctx, (file1,) + pats, opts)
1894 for abs in ctx.walk(m):
1895 for abs in ctx.walk(m):
1895 fctx = ctx[abs]
1896 fctx = ctx[abs]
1896 o = fctx.filelog().renamed(fctx.filenode())
1897 o = fctx.filelog().renamed(fctx.filenode())
1897 rel = m.rel(abs)
1898 rel = m.rel(abs)
1898 if o:
1899 if o:
1899 ui.write(_("%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
1900 ui.write(_("%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
1900 else:
1901 else:
1901 ui.write(_("%s not renamed\n") % rel)
1902 ui.write(_("%s not renamed\n") % rel)
1902
1903
1903 @command('debugrevlog', cmdutil.debugrevlogopts +
1904 @command('debugrevlog', cmdutil.debugrevlogopts +
1904 [('d', 'dump', False, _('dump index data'))],
1905 [('d', 'dump', False, _('dump index data'))],
1905 _('-c|-m|FILE'),
1906 _('-c|-m|FILE'),
1906 optionalrepo=True)
1907 optionalrepo=True)
1907 def debugrevlog(ui, repo, file_=None, **opts):
1908 def debugrevlog(ui, repo, file_=None, **opts):
1908 """show data and statistics about a revlog"""
1909 """show data and statistics about a revlog"""
1909 opts = pycompat.byteskwargs(opts)
1910 opts = pycompat.byteskwargs(opts)
1910 r = cmdutil.openrevlog(repo, 'debugrevlog', file_, opts)
1911 r = cmdutil.openrevlog(repo, 'debugrevlog', file_, opts)
1911
1912
1912 if opts.get("dump"):
1913 if opts.get("dump"):
1913 numrevs = len(r)
1914 numrevs = len(r)
1914 ui.write(("# rev p1rev p2rev start end deltastart base p1 p2"
1915 ui.write(("# rev p1rev p2rev start end deltastart base p1 p2"
1915 " rawsize totalsize compression heads chainlen\n"))
1916 " rawsize totalsize compression heads chainlen\n"))
1916 ts = 0
1917 ts = 0
1917 heads = set()
1918 heads = set()
1918
1919
1919 for rev in xrange(numrevs):
1920 for rev in xrange(numrevs):
1920 dbase = r.deltaparent(rev)
1921 dbase = r.deltaparent(rev)
1921 if dbase == -1:
1922 if dbase == -1:
1922 dbase = rev
1923 dbase = rev
1923 cbase = r.chainbase(rev)
1924 cbase = r.chainbase(rev)
1924 clen = r.chainlen(rev)
1925 clen = r.chainlen(rev)
1925 p1, p2 = r.parentrevs(rev)
1926 p1, p2 = r.parentrevs(rev)
1926 rs = r.rawsize(rev)
1927 rs = r.rawsize(rev)
1927 ts = ts + rs
1928 ts = ts + rs
1928 heads -= set(r.parentrevs(rev))
1929 heads -= set(r.parentrevs(rev))
1929 heads.add(rev)
1930 heads.add(rev)
1930 try:
1931 try:
1931 compression = ts / r.end(rev)
1932 compression = ts / r.end(rev)
1932 except ZeroDivisionError:
1933 except ZeroDivisionError:
1933 compression = 0
1934 compression = 0
1934 ui.write("%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d "
1935 ui.write("%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d "
1935 "%11d %5d %8d\n" %
1936 "%11d %5d %8d\n" %
1936 (rev, p1, p2, r.start(rev), r.end(rev),
1937 (rev, p1, p2, r.start(rev), r.end(rev),
1937 r.start(dbase), r.start(cbase),
1938 r.start(dbase), r.start(cbase),
1938 r.start(p1), r.start(p2),
1939 r.start(p1), r.start(p2),
1939 rs, ts, compression, len(heads), clen))
1940 rs, ts, compression, len(heads), clen))
1940 return 0
1941 return 0
1941
1942
1942 v = r.version
1943 v = r.version
1943 format = v & 0xFFFF
1944 format = v & 0xFFFF
1944 flags = []
1945 flags = []
1945 gdelta = False
1946 gdelta = False
1946 if v & revlog.FLAG_INLINE_DATA:
1947 if v & revlog.FLAG_INLINE_DATA:
1947 flags.append('inline')
1948 flags.append('inline')
1948 if v & revlog.FLAG_GENERALDELTA:
1949 if v & revlog.FLAG_GENERALDELTA:
1949 gdelta = True
1950 gdelta = True
1950 flags.append('generaldelta')
1951 flags.append('generaldelta')
1951 if not flags:
1952 if not flags:
1952 flags = ['(none)']
1953 flags = ['(none)']
1953
1954
1954 nummerges = 0
1955 nummerges = 0
1955 numfull = 0
1956 numfull = 0
1956 numprev = 0
1957 numprev = 0
1957 nump1 = 0
1958 nump1 = 0
1958 nump2 = 0
1959 nump2 = 0
1959 numother = 0
1960 numother = 0
1960 nump1prev = 0
1961 nump1prev = 0
1961 nump2prev = 0
1962 nump2prev = 0
1962 chainlengths = []
1963 chainlengths = []
1963 chainbases = []
1964 chainbases = []
1964 chainspans = []
1965 chainspans = []
1965
1966
1966 datasize = [None, 0, 0]
1967 datasize = [None, 0, 0]
1967 fullsize = [None, 0, 0]
1968 fullsize = [None, 0, 0]
1968 deltasize = [None, 0, 0]
1969 deltasize = [None, 0, 0]
1969 chunktypecounts = {}
1970 chunktypecounts = {}
1970 chunktypesizes = {}
1971 chunktypesizes = {}
1971
1972
1972 def addsize(size, l):
1973 def addsize(size, l):
1973 if l[0] is None or size < l[0]:
1974 if l[0] is None or size < l[0]:
1974 l[0] = size
1975 l[0] = size
1975 if size > l[1]:
1976 if size > l[1]:
1976 l[1] = size
1977 l[1] = size
1977 l[2] += size
1978 l[2] += size
1978
1979
1979 numrevs = len(r)
1980 numrevs = len(r)
1980 for rev in xrange(numrevs):
1981 for rev in xrange(numrevs):
1981 p1, p2 = r.parentrevs(rev)
1982 p1, p2 = r.parentrevs(rev)
1982 delta = r.deltaparent(rev)
1983 delta = r.deltaparent(rev)
1983 if format > 0:
1984 if format > 0:
1984 addsize(r.rawsize(rev), datasize)
1985 addsize(r.rawsize(rev), datasize)
1985 if p2 != nullrev:
1986 if p2 != nullrev:
1986 nummerges += 1
1987 nummerges += 1
1987 size = r.length(rev)
1988 size = r.length(rev)
1988 if delta == nullrev:
1989 if delta == nullrev:
1989 chainlengths.append(0)
1990 chainlengths.append(0)
1990 chainbases.append(r.start(rev))
1991 chainbases.append(r.start(rev))
1991 chainspans.append(size)
1992 chainspans.append(size)
1992 numfull += 1
1993 numfull += 1
1993 addsize(size, fullsize)
1994 addsize(size, fullsize)
1994 else:
1995 else:
1995 chainlengths.append(chainlengths[delta] + 1)
1996 chainlengths.append(chainlengths[delta] + 1)
1996 baseaddr = chainbases[delta]
1997 baseaddr = chainbases[delta]
1997 revaddr = r.start(rev)
1998 revaddr = r.start(rev)
1998 chainbases.append(baseaddr)
1999 chainbases.append(baseaddr)
1999 chainspans.append((revaddr - baseaddr) + size)
2000 chainspans.append((revaddr - baseaddr) + size)
2000 addsize(size, deltasize)
2001 addsize(size, deltasize)
2001 if delta == rev - 1:
2002 if delta == rev - 1:
2002 numprev += 1
2003 numprev += 1
2003 if delta == p1:
2004 if delta == p1:
2004 nump1prev += 1
2005 nump1prev += 1
2005 elif delta == p2:
2006 elif delta == p2:
2006 nump2prev += 1
2007 nump2prev += 1
2007 elif delta == p1:
2008 elif delta == p1:
2008 nump1 += 1
2009 nump1 += 1
2009 elif delta == p2:
2010 elif delta == p2:
2010 nump2 += 1
2011 nump2 += 1
2011 elif delta != nullrev:
2012 elif delta != nullrev:
2012 numother += 1
2013 numother += 1
2013
2014
2014 # Obtain data on the raw chunks in the revlog.
2015 # Obtain data on the raw chunks in the revlog.
2015 segment = r._getsegmentforrevs(rev, rev)[1]
2016 segment = r._getsegmentforrevs(rev, rev)[1]
2016 if segment:
2017 if segment:
2017 chunktype = bytes(segment[0:1])
2018 chunktype = bytes(segment[0:1])
2018 else:
2019 else:
2019 chunktype = 'empty'
2020 chunktype = 'empty'
2020
2021
2021 if chunktype not in chunktypecounts:
2022 if chunktype not in chunktypecounts:
2022 chunktypecounts[chunktype] = 0
2023 chunktypecounts[chunktype] = 0
2023 chunktypesizes[chunktype] = 0
2024 chunktypesizes[chunktype] = 0
2024
2025
2025 chunktypecounts[chunktype] += 1
2026 chunktypecounts[chunktype] += 1
2026 chunktypesizes[chunktype] += size
2027 chunktypesizes[chunktype] += size
2027
2028
2028 # Adjust size min value for empty cases
2029 # Adjust size min value for empty cases
2029 for size in (datasize, fullsize, deltasize):
2030 for size in (datasize, fullsize, deltasize):
2030 if size[0] is None:
2031 if size[0] is None:
2031 size[0] = 0
2032 size[0] = 0
2032
2033
2033 numdeltas = numrevs - numfull
2034 numdeltas = numrevs - numfull
2034 numoprev = numprev - nump1prev - nump2prev
2035 numoprev = numprev - nump1prev - nump2prev
2035 totalrawsize = datasize[2]
2036 totalrawsize = datasize[2]
2036 datasize[2] /= numrevs
2037 datasize[2] /= numrevs
2037 fulltotal = fullsize[2]
2038 fulltotal = fullsize[2]
2038 fullsize[2] /= numfull
2039 fullsize[2] /= numfull
2039 deltatotal = deltasize[2]
2040 deltatotal = deltasize[2]
2040 if numrevs - numfull > 0:
2041 if numrevs - numfull > 0:
2041 deltasize[2] /= numrevs - numfull
2042 deltasize[2] /= numrevs - numfull
2042 totalsize = fulltotal + deltatotal
2043 totalsize = fulltotal + deltatotal
2043 avgchainlen = sum(chainlengths) / numrevs
2044 avgchainlen = sum(chainlengths) / numrevs
2044 maxchainlen = max(chainlengths)
2045 maxchainlen = max(chainlengths)
2045 maxchainspan = max(chainspans)
2046 maxchainspan = max(chainspans)
2046 compratio = 1
2047 compratio = 1
2047 if totalsize:
2048 if totalsize:
2048 compratio = totalrawsize / totalsize
2049 compratio = totalrawsize / totalsize
2049
2050
2050 basedfmtstr = '%%%dd\n'
2051 basedfmtstr = '%%%dd\n'
2051 basepcfmtstr = '%%%dd %s(%%5.2f%%%%)\n'
2052 basepcfmtstr = '%%%dd %s(%%5.2f%%%%)\n'
2052
2053
2053 def dfmtstr(max):
2054 def dfmtstr(max):
2054 return basedfmtstr % len(str(max))
2055 return basedfmtstr % len(str(max))
2055 def pcfmtstr(max, padding=0):
2056 def pcfmtstr(max, padding=0):
2056 return basepcfmtstr % (len(str(max)), ' ' * padding)
2057 return basepcfmtstr % (len(str(max)), ' ' * padding)
2057
2058
2058 def pcfmt(value, total):
2059 def pcfmt(value, total):
2059 if total:
2060 if total:
2060 return (value, 100 * float(value) / total)
2061 return (value, 100 * float(value) / total)
2061 else:
2062 else:
2062 return value, 100.0
2063 return value, 100.0
2063
2064
2064 ui.write(('format : %d\n') % format)
2065 ui.write(('format : %d\n') % format)
2065 ui.write(('flags : %s\n') % ', '.join(flags))
2066 ui.write(('flags : %s\n') % ', '.join(flags))
2066
2067
2067 ui.write('\n')
2068 ui.write('\n')
2068 fmt = pcfmtstr(totalsize)
2069 fmt = pcfmtstr(totalsize)
2069 fmt2 = dfmtstr(totalsize)
2070 fmt2 = dfmtstr(totalsize)
2070 ui.write(('revisions : ') + fmt2 % numrevs)
2071 ui.write(('revisions : ') + fmt2 % numrevs)
2071 ui.write((' merges : ') + fmt % pcfmt(nummerges, numrevs))
2072 ui.write((' merges : ') + fmt % pcfmt(nummerges, numrevs))
2072 ui.write((' normal : ') + fmt % pcfmt(numrevs - nummerges, numrevs))
2073 ui.write((' normal : ') + fmt % pcfmt(numrevs - nummerges, numrevs))
2073 ui.write(('revisions : ') + fmt2 % numrevs)
2074 ui.write(('revisions : ') + fmt2 % numrevs)
2074 ui.write((' full : ') + fmt % pcfmt(numfull, numrevs))
2075 ui.write((' full : ') + fmt % pcfmt(numfull, numrevs))
2075 ui.write((' deltas : ') + fmt % pcfmt(numdeltas, numrevs))
2076 ui.write((' deltas : ') + fmt % pcfmt(numdeltas, numrevs))
2076 ui.write(('revision size : ') + fmt2 % totalsize)
2077 ui.write(('revision size : ') + fmt2 % totalsize)
2077 ui.write((' full : ') + fmt % pcfmt(fulltotal, totalsize))
2078 ui.write((' full : ') + fmt % pcfmt(fulltotal, totalsize))
2078 ui.write((' deltas : ') + fmt % pcfmt(deltatotal, totalsize))
2079 ui.write((' deltas : ') + fmt % pcfmt(deltatotal, totalsize))
2079
2080
2080 def fmtchunktype(chunktype):
2081 def fmtchunktype(chunktype):
2081 if chunktype == 'empty':
2082 if chunktype == 'empty':
2082 return ' %s : ' % chunktype
2083 return ' %s : ' % chunktype
2083 elif chunktype in pycompat.bytestr(string.ascii_letters):
2084 elif chunktype in pycompat.bytestr(string.ascii_letters):
2084 return ' 0x%s (%s) : ' % (hex(chunktype), chunktype)
2085 return ' 0x%s (%s) : ' % (hex(chunktype), chunktype)
2085 else:
2086 else:
2086 return ' 0x%s : ' % hex(chunktype)
2087 return ' 0x%s : ' % hex(chunktype)
2087
2088
2088 ui.write('\n')
2089 ui.write('\n')
2089 ui.write(('chunks : ') + fmt2 % numrevs)
2090 ui.write(('chunks : ') + fmt2 % numrevs)
2090 for chunktype in sorted(chunktypecounts):
2091 for chunktype in sorted(chunktypecounts):
2091 ui.write(fmtchunktype(chunktype))
2092 ui.write(fmtchunktype(chunktype))
2092 ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs))
2093 ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs))
2093 ui.write(('chunks size : ') + fmt2 % totalsize)
2094 ui.write(('chunks size : ') + fmt2 % totalsize)
2094 for chunktype in sorted(chunktypecounts):
2095 for chunktype in sorted(chunktypecounts):
2095 ui.write(fmtchunktype(chunktype))
2096 ui.write(fmtchunktype(chunktype))
2096 ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize))
2097 ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize))
2097
2098
2098 ui.write('\n')
2099 ui.write('\n')
2099 fmt = dfmtstr(max(avgchainlen, maxchainlen, maxchainspan, compratio))
2100 fmt = dfmtstr(max(avgchainlen, maxchainlen, maxchainspan, compratio))
2100 ui.write(('avg chain length : ') + fmt % avgchainlen)
2101 ui.write(('avg chain length : ') + fmt % avgchainlen)
2101 ui.write(('max chain length : ') + fmt % maxchainlen)
2102 ui.write(('max chain length : ') + fmt % maxchainlen)
2102 ui.write(('max chain reach : ') + fmt % maxchainspan)
2103 ui.write(('max chain reach : ') + fmt % maxchainspan)
2103 ui.write(('compression ratio : ') + fmt % compratio)
2104 ui.write(('compression ratio : ') + fmt % compratio)
2104
2105
2105 if format > 0:
2106 if format > 0:
2106 ui.write('\n')
2107 ui.write('\n')
2107 ui.write(('uncompressed data size (min/max/avg) : %d / %d / %d\n')
2108 ui.write(('uncompressed data size (min/max/avg) : %d / %d / %d\n')
2108 % tuple(datasize))
2109 % tuple(datasize))
2109 ui.write(('full revision size (min/max/avg) : %d / %d / %d\n')
2110 ui.write(('full revision size (min/max/avg) : %d / %d / %d\n')
2110 % tuple(fullsize))
2111 % tuple(fullsize))
2111 ui.write(('delta size (min/max/avg) : %d / %d / %d\n')
2112 ui.write(('delta size (min/max/avg) : %d / %d / %d\n')
2112 % tuple(deltasize))
2113 % tuple(deltasize))
2113
2114
2114 if numdeltas > 0:
2115 if numdeltas > 0:
2115 ui.write('\n')
2116 ui.write('\n')
2116 fmt = pcfmtstr(numdeltas)
2117 fmt = pcfmtstr(numdeltas)
2117 fmt2 = pcfmtstr(numdeltas, 4)
2118 fmt2 = pcfmtstr(numdeltas, 4)
2118 ui.write(('deltas against prev : ') + fmt % pcfmt(numprev, numdeltas))
2119 ui.write(('deltas against prev : ') + fmt % pcfmt(numprev, numdeltas))
2119 if numprev > 0:
2120 if numprev > 0:
2120 ui.write((' where prev = p1 : ') + fmt2 % pcfmt(nump1prev,
2121 ui.write((' where prev = p1 : ') + fmt2 % pcfmt(nump1prev,
2121 numprev))
2122 numprev))
2122 ui.write((' where prev = p2 : ') + fmt2 % pcfmt(nump2prev,
2123 ui.write((' where prev = p2 : ') + fmt2 % pcfmt(nump2prev,
2123 numprev))
2124 numprev))
2124 ui.write((' other : ') + fmt2 % pcfmt(numoprev,
2125 ui.write((' other : ') + fmt2 % pcfmt(numoprev,
2125 numprev))
2126 numprev))
2126 if gdelta:
2127 if gdelta:
2127 ui.write(('deltas against p1 : ')
2128 ui.write(('deltas against p1 : ')
2128 + fmt % pcfmt(nump1, numdeltas))
2129 + fmt % pcfmt(nump1, numdeltas))
2129 ui.write(('deltas against p2 : ')
2130 ui.write(('deltas against p2 : ')
2130 + fmt % pcfmt(nump2, numdeltas))
2131 + fmt % pcfmt(nump2, numdeltas))
2131 ui.write(('deltas against other : ') + fmt % pcfmt(numother,
2132 ui.write(('deltas against other : ') + fmt % pcfmt(numother,
2132 numdeltas))
2133 numdeltas))
2133
2134
2134 @command('debugrevspec',
2135 @command('debugrevspec',
2135 [('', 'optimize', None,
2136 [('', 'optimize', None,
2136 _('print parsed tree after optimizing (DEPRECATED)')),
2137 _('print parsed tree after optimizing (DEPRECATED)')),
2137 ('', 'show-revs', True, _('print list of result revisions (default)')),
2138 ('', 'show-revs', True, _('print list of result revisions (default)')),
2138 ('s', 'show-set', None, _('print internal representation of result set')),
2139 ('s', 'show-set', None, _('print internal representation of result set')),
2139 ('p', 'show-stage', [],
2140 ('p', 'show-stage', [],
2140 _('print parsed tree at the given stage'), _('NAME')),
2141 _('print parsed tree at the given stage'), _('NAME')),
2141 ('', 'no-optimized', False, _('evaluate tree without optimization')),
2142 ('', 'no-optimized', False, _('evaluate tree without optimization')),
2142 ('', 'verify-optimized', False, _('verify optimized result')),
2143 ('', 'verify-optimized', False, _('verify optimized result')),
2143 ],
2144 ],
2144 ('REVSPEC'))
2145 ('REVSPEC'))
2145 def debugrevspec(ui, repo, expr, **opts):
2146 def debugrevspec(ui, repo, expr, **opts):
2146 """parse and apply a revision specification
2147 """parse and apply a revision specification
2147
2148
2148 Use -p/--show-stage option to print the parsed tree at the given stages.
2149 Use -p/--show-stage option to print the parsed tree at the given stages.
2149 Use -p all to print tree at every stage.
2150 Use -p all to print tree at every stage.
2150
2151
2151 Use --no-show-revs option with -s or -p to print only the set
2152 Use --no-show-revs option with -s or -p to print only the set
2152 representation or the parsed tree respectively.
2153 representation or the parsed tree respectively.
2153
2154
2154 Use --verify-optimized to compare the optimized result with the unoptimized
2155 Use --verify-optimized to compare the optimized result with the unoptimized
2155 one. Returns 1 if the optimized result differs.
2156 one. Returns 1 if the optimized result differs.
2156 """
2157 """
2157 opts = pycompat.byteskwargs(opts)
2158 opts = pycompat.byteskwargs(opts)
2158 aliases = ui.configitems('revsetalias')
2159 aliases = ui.configitems('revsetalias')
2159 stages = [
2160 stages = [
2160 ('parsed', lambda tree: tree),
2161 ('parsed', lambda tree: tree),
2161 ('expanded', lambda tree: revsetlang.expandaliases(tree, aliases,
2162 ('expanded', lambda tree: revsetlang.expandaliases(tree, aliases,
2162 ui.warn)),
2163 ui.warn)),
2163 ('concatenated', revsetlang.foldconcat),
2164 ('concatenated', revsetlang.foldconcat),
2164 ('analyzed', revsetlang.analyze),
2165 ('analyzed', revsetlang.analyze),
2165 ('optimized', revsetlang.optimize),
2166 ('optimized', revsetlang.optimize),
2166 ]
2167 ]
2167 if opts['no_optimized']:
2168 if opts['no_optimized']:
2168 stages = stages[:-1]
2169 stages = stages[:-1]
2169 if opts['verify_optimized'] and opts['no_optimized']:
2170 if opts['verify_optimized'] and opts['no_optimized']:
2170 raise error.Abort(_('cannot use --verify-optimized with '
2171 raise error.Abort(_('cannot use --verify-optimized with '
2171 '--no-optimized'))
2172 '--no-optimized'))
2172 stagenames = set(n for n, f in stages)
2173 stagenames = set(n for n, f in stages)
2173
2174
2174 showalways = set()
2175 showalways = set()
2175 showchanged = set()
2176 showchanged = set()
2176 if ui.verbose and not opts['show_stage']:
2177 if ui.verbose and not opts['show_stage']:
2177 # show parsed tree by --verbose (deprecated)
2178 # show parsed tree by --verbose (deprecated)
2178 showalways.add('parsed')
2179 showalways.add('parsed')
2179 showchanged.update(['expanded', 'concatenated'])
2180 showchanged.update(['expanded', 'concatenated'])
2180 if opts['optimize']:
2181 if opts['optimize']:
2181 showalways.add('optimized')
2182 showalways.add('optimized')
2182 if opts['show_stage'] and opts['optimize']:
2183 if opts['show_stage'] and opts['optimize']:
2183 raise error.Abort(_('cannot use --optimize with --show-stage'))
2184 raise error.Abort(_('cannot use --optimize with --show-stage'))
2184 if opts['show_stage'] == ['all']:
2185 if opts['show_stage'] == ['all']:
2185 showalways.update(stagenames)
2186 showalways.update(stagenames)
2186 else:
2187 else:
2187 for n in opts['show_stage']:
2188 for n in opts['show_stage']:
2188 if n not in stagenames:
2189 if n not in stagenames:
2189 raise error.Abort(_('invalid stage name: %s') % n)
2190 raise error.Abort(_('invalid stage name: %s') % n)
2190 showalways.update(opts['show_stage'])
2191 showalways.update(opts['show_stage'])
2191
2192
2192 treebystage = {}
2193 treebystage = {}
2193 printedtree = None
2194 printedtree = None
2194 tree = revsetlang.parse(expr, lookup=repo.__contains__)
2195 tree = revsetlang.parse(expr, lookup=repo.__contains__)
2195 for n, f in stages:
2196 for n, f in stages:
2196 treebystage[n] = tree = f(tree)
2197 treebystage[n] = tree = f(tree)
2197 if n in showalways or (n in showchanged and tree != printedtree):
2198 if n in showalways or (n in showchanged and tree != printedtree):
2198 if opts['show_stage'] or n != 'parsed':
2199 if opts['show_stage'] or n != 'parsed':
2199 ui.write(("* %s:\n") % n)
2200 ui.write(("* %s:\n") % n)
2200 ui.write(revsetlang.prettyformat(tree), "\n")
2201 ui.write(revsetlang.prettyformat(tree), "\n")
2201 printedtree = tree
2202 printedtree = tree
2202
2203
2203 if opts['verify_optimized']:
2204 if opts['verify_optimized']:
2204 arevs = revset.makematcher(treebystage['analyzed'])(repo)
2205 arevs = revset.makematcher(treebystage['analyzed'])(repo)
2205 brevs = revset.makematcher(treebystage['optimized'])(repo)
2206 brevs = revset.makematcher(treebystage['optimized'])(repo)
2206 if opts['show_set'] or (opts['show_set'] is None and ui.verbose):
2207 if opts['show_set'] or (opts['show_set'] is None and ui.verbose):
2207 ui.write(("* analyzed set:\n"), smartset.prettyformat(arevs), "\n")
2208 ui.write(("* analyzed set:\n"), smartset.prettyformat(arevs), "\n")
2208 ui.write(("* optimized set:\n"), smartset.prettyformat(brevs), "\n")
2209 ui.write(("* optimized set:\n"), smartset.prettyformat(brevs), "\n")
2209 arevs = list(arevs)
2210 arevs = list(arevs)
2210 brevs = list(brevs)
2211 brevs = list(brevs)
2211 if arevs == brevs:
2212 if arevs == brevs:
2212 return 0
2213 return 0
2213 ui.write(('--- analyzed\n'), label='diff.file_a')
2214 ui.write(('--- analyzed\n'), label='diff.file_a')
2214 ui.write(('+++ optimized\n'), label='diff.file_b')
2215 ui.write(('+++ optimized\n'), label='diff.file_b')
2215 sm = difflib.SequenceMatcher(None, arevs, brevs)
2216 sm = difflib.SequenceMatcher(None, arevs, brevs)
2216 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
2217 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
2217 if tag in ('delete', 'replace'):
2218 if tag in ('delete', 'replace'):
2218 for c in arevs[alo:ahi]:
2219 for c in arevs[alo:ahi]:
2219 ui.write('-%s\n' % c, label='diff.deleted')
2220 ui.write('-%s\n' % c, label='diff.deleted')
2220 if tag in ('insert', 'replace'):
2221 if tag in ('insert', 'replace'):
2221 for c in brevs[blo:bhi]:
2222 for c in brevs[blo:bhi]:
2222 ui.write('+%s\n' % c, label='diff.inserted')
2223 ui.write('+%s\n' % c, label='diff.inserted')
2223 if tag == 'equal':
2224 if tag == 'equal':
2224 for c in arevs[alo:ahi]:
2225 for c in arevs[alo:ahi]:
2225 ui.write(' %s\n' % c)
2226 ui.write(' %s\n' % c)
2226 return 1
2227 return 1
2227
2228
2228 func = revset.makematcher(tree)
2229 func = revset.makematcher(tree)
2229 revs = func(repo)
2230 revs = func(repo)
2230 if opts['show_set'] or (opts['show_set'] is None and ui.verbose):
2231 if opts['show_set'] or (opts['show_set'] is None and ui.verbose):
2231 ui.write(("* set:\n"), smartset.prettyformat(revs), "\n")
2232 ui.write(("* set:\n"), smartset.prettyformat(revs), "\n")
2232 if not opts['show_revs']:
2233 if not opts['show_revs']:
2233 return
2234 return
2234 for c in revs:
2235 for c in revs:
2235 ui.write("%d\n" % c)
2236 ui.write("%d\n" % c)
2236
2237
2237 @command('debugserve', [
2238 @command('debugserve', [
2238 ('', 'sshstdio', False, _('run an SSH server bound to process handles')),
2239 ('', 'sshstdio', False, _('run an SSH server bound to process handles')),
2239 ('', 'logiofd', '', _('file descriptor to log server I/O to')),
2240 ('', 'logiofd', '', _('file descriptor to log server I/O to')),
2240 ('', 'logiofile', '', _('file to log server I/O to')),
2241 ('', 'logiofile', '', _('file to log server I/O to')),
2241 ], '')
2242 ], '')
2242 def debugserve(ui, repo, **opts):
2243 def debugserve(ui, repo, **opts):
2243 """run a server with advanced settings
2244 """run a server with advanced settings
2244
2245
2245 This command is similar to :hg:`serve`. It exists partially as a
2246 This command is similar to :hg:`serve`. It exists partially as a
2246 workaround to the fact that ``hg serve --stdio`` must have specific
2247 workaround to the fact that ``hg serve --stdio`` must have specific
2247 arguments for security reasons.
2248 arguments for security reasons.
2248 """
2249 """
2249 opts = pycompat.byteskwargs(opts)
2250 opts = pycompat.byteskwargs(opts)
2250
2251
2251 if not opts['sshstdio']:
2252 if not opts['sshstdio']:
2252 raise error.Abort(_('only --sshstdio is currently supported'))
2253 raise error.Abort(_('only --sshstdio is currently supported'))
2253
2254
2254 logfh = None
2255 logfh = None
2255
2256
2256 if opts['logiofd'] and opts['logiofile']:
2257 if opts['logiofd'] and opts['logiofile']:
2257 raise error.Abort(_('cannot use both --logiofd and --logiofile'))
2258 raise error.Abort(_('cannot use both --logiofd and --logiofile'))
2258
2259
2259 if opts['logiofd']:
2260 if opts['logiofd']:
2260 # Line buffered because output is line based.
2261 # Line buffered because output is line based.
2261 logfh = os.fdopen(int(opts['logiofd']), r'ab', 1)
2262 logfh = os.fdopen(int(opts['logiofd']), r'ab', 1)
2262 elif opts['logiofile']:
2263 elif opts['logiofile']:
2263 logfh = open(opts['logiofile'], 'ab', 1)
2264 logfh = open(opts['logiofile'], 'ab', 1)
2264
2265
2265 s = wireprotoserver.sshserver(ui, repo, logfh=logfh)
2266 s = wireprotoserver.sshserver(ui, repo, logfh=logfh)
2266 s.serve_forever()
2267 s.serve_forever()
2267
2268
2268 @command('debugsetparents', [], _('REV1 [REV2]'))
2269 @command('debugsetparents', [], _('REV1 [REV2]'))
2269 def debugsetparents(ui, repo, rev1, rev2=None):
2270 def debugsetparents(ui, repo, rev1, rev2=None):
2270 """manually set the parents of the current working directory
2271 """manually set the parents of the current working directory
2271
2272
2272 This is useful for writing repository conversion tools, but should
2273 This is useful for writing repository conversion tools, but should
2273 be used with care. For example, neither the working directory nor the
2274 be used with care. For example, neither the working directory nor the
2274 dirstate is updated, so file status may be incorrect after running this
2275 dirstate is updated, so file status may be incorrect after running this
2275 command.
2276 command.
2276
2277
2277 Returns 0 on success.
2278 Returns 0 on success.
2278 """
2279 """
2279
2280
2280 r1 = scmutil.revsingle(repo, rev1).node()
2281 r1 = scmutil.revsingle(repo, rev1).node()
2281 r2 = scmutil.revsingle(repo, rev2, 'null').node()
2282 r2 = scmutil.revsingle(repo, rev2, 'null').node()
2282
2283
2283 with repo.wlock():
2284 with repo.wlock():
2284 repo.setparents(r1, r2)
2285 repo.setparents(r1, r2)
2285
2286
2286 @command('debugssl', [], '[SOURCE]', optionalrepo=True)
2287 @command('debugssl', [], '[SOURCE]', optionalrepo=True)
2287 def debugssl(ui, repo, source=None, **opts):
2288 def debugssl(ui, repo, source=None, **opts):
2288 '''test a secure connection to a server
2289 '''test a secure connection to a server
2289
2290
2290 This builds the certificate chain for the server on Windows, installing the
2291 This builds the certificate chain for the server on Windows, installing the
2291 missing intermediates and trusted root via Windows Update if necessary. It
2292 missing intermediates and trusted root via Windows Update if necessary. It
2292 does nothing on other platforms.
2293 does nothing on other platforms.
2293
2294
2294 If SOURCE is omitted, the 'default' path will be used. If a URL is given,
2295 If SOURCE is omitted, the 'default' path will be used. If a URL is given,
2295 that server is used. See :hg:`help urls` for more information.
2296 that server is used. See :hg:`help urls` for more information.
2296
2297
2297 If the update succeeds, retry the original operation. Otherwise, the cause
2298 If the update succeeds, retry the original operation. Otherwise, the cause
2298 of the SSL error is likely another issue.
2299 of the SSL error is likely another issue.
2299 '''
2300 '''
2300 if not pycompat.iswindows:
2301 if not pycompat.iswindows:
2301 raise error.Abort(_('certificate chain building is only possible on '
2302 raise error.Abort(_('certificate chain building is only possible on '
2302 'Windows'))
2303 'Windows'))
2303
2304
2304 if not source:
2305 if not source:
2305 if not repo:
2306 if not repo:
2306 raise error.Abort(_("there is no Mercurial repository here, and no "
2307 raise error.Abort(_("there is no Mercurial repository here, and no "
2307 "server specified"))
2308 "server specified"))
2308 source = "default"
2309 source = "default"
2309
2310
2310 source, branches = hg.parseurl(ui.expandpath(source))
2311 source, branches = hg.parseurl(ui.expandpath(source))
2311 url = util.url(source)
2312 url = util.url(source)
2312 addr = None
2313 addr = None
2313
2314
2314 defaultport = {'https': 443, 'ssh': 22}
2315 defaultport = {'https': 443, 'ssh': 22}
2315 if url.scheme in defaultport:
2316 if url.scheme in defaultport:
2316 try:
2317 try:
2317 addr = (url.host, int(url.port or defaultport[url.scheme]))
2318 addr = (url.host, int(url.port or defaultport[url.scheme]))
2318 except ValueError:
2319 except ValueError:
2319 raise error.Abort(_("malformed port number in URL"))
2320 raise error.Abort(_("malformed port number in URL"))
2320 else:
2321 else:
2321 raise error.Abort(_("only https and ssh connections are supported"))
2322 raise error.Abort(_("only https and ssh connections are supported"))
2322
2323
2323 from . import win32
2324 from . import win32
2324
2325
2325 s = ssl.wrap_socket(socket.socket(), ssl_version=ssl.PROTOCOL_TLS,
2326 s = ssl.wrap_socket(socket.socket(), ssl_version=ssl.PROTOCOL_TLS,
2326 cert_reqs=ssl.CERT_NONE, ca_certs=None)
2327 cert_reqs=ssl.CERT_NONE, ca_certs=None)
2327
2328
2328 try:
2329 try:
2329 s.connect(addr)
2330 s.connect(addr)
2330 cert = s.getpeercert(True)
2331 cert = s.getpeercert(True)
2331
2332
2332 ui.status(_('checking the certificate chain for %s\n') % url.host)
2333 ui.status(_('checking the certificate chain for %s\n') % url.host)
2333
2334
2334 complete = win32.checkcertificatechain(cert, build=False)
2335 complete = win32.checkcertificatechain(cert, build=False)
2335
2336
2336 if not complete:
2337 if not complete:
2337 ui.status(_('certificate chain is incomplete, updating... '))
2338 ui.status(_('certificate chain is incomplete, updating... '))
2338
2339
2339 if not win32.checkcertificatechain(cert):
2340 if not win32.checkcertificatechain(cert):
2340 ui.status(_('failed.\n'))
2341 ui.status(_('failed.\n'))
2341 else:
2342 else:
2342 ui.status(_('done.\n'))
2343 ui.status(_('done.\n'))
2343 else:
2344 else:
2344 ui.status(_('full certificate chain is available\n'))
2345 ui.status(_('full certificate chain is available\n'))
2345 finally:
2346 finally:
2346 s.close()
2347 s.close()
2347
2348
2348 @command('debugsub',
2349 @command('debugsub',
2349 [('r', 'rev', '',
2350 [('r', 'rev', '',
2350 _('revision to check'), _('REV'))],
2351 _('revision to check'), _('REV'))],
2351 _('[-r REV] [REV]'))
2352 _('[-r REV] [REV]'))
2352 def debugsub(ui, repo, rev=None):
2353 def debugsub(ui, repo, rev=None):
2353 ctx = scmutil.revsingle(repo, rev, None)
2354 ctx = scmutil.revsingle(repo, rev, None)
2354 for k, v in sorted(ctx.substate.items()):
2355 for k, v in sorted(ctx.substate.items()):
2355 ui.write(('path %s\n') % k)
2356 ui.write(('path %s\n') % k)
2356 ui.write((' source %s\n') % v[0])
2357 ui.write((' source %s\n') % v[0])
2357 ui.write((' revision %s\n') % v[1])
2358 ui.write((' revision %s\n') % v[1])
2358
2359
2359 @command('debugsuccessorssets',
2360 @command('debugsuccessorssets',
2360 [('', 'closest', False, _('return closest successors sets only'))],
2361 [('', 'closest', False, _('return closest successors sets only'))],
2361 _('[REV]'))
2362 _('[REV]'))
2362 def debugsuccessorssets(ui, repo, *revs, **opts):
2363 def debugsuccessorssets(ui, repo, *revs, **opts):
2363 """show set of successors for revision
2364 """show set of successors for revision
2364
2365
2365 A successors set of changeset A is a consistent group of revisions that
2366 A successors set of changeset A is a consistent group of revisions that
2366 succeed A. It contains non-obsolete changesets only unless closests
2367 succeed A. It contains non-obsolete changesets only unless closests
2367 successors set is set.
2368 successors set is set.
2368
2369
2369 In most cases a changeset A has a single successors set containing a single
2370 In most cases a changeset A has a single successors set containing a single
2370 successor (changeset A replaced by A').
2371 successor (changeset A replaced by A').
2371
2372
2372 A changeset that is made obsolete with no successors are called "pruned".
2373 A changeset that is made obsolete with no successors are called "pruned".
2373 Such changesets have no successors sets at all.
2374 Such changesets have no successors sets at all.
2374
2375
2375 A changeset that has been "split" will have a successors set containing
2376 A changeset that has been "split" will have a successors set containing
2376 more than one successor.
2377 more than one successor.
2377
2378
2378 A changeset that has been rewritten in multiple different ways is called
2379 A changeset that has been rewritten in multiple different ways is called
2379 "divergent". Such changesets have multiple successor sets (each of which
2380 "divergent". Such changesets have multiple successor sets (each of which
2380 may also be split, i.e. have multiple successors).
2381 may also be split, i.e. have multiple successors).
2381
2382
2382 Results are displayed as follows::
2383 Results are displayed as follows::
2383
2384
2384 <rev1>
2385 <rev1>
2385 <successors-1A>
2386 <successors-1A>
2386 <rev2>
2387 <rev2>
2387 <successors-2A>
2388 <successors-2A>
2388 <successors-2B1> <successors-2B2> <successors-2B3>
2389 <successors-2B1> <successors-2B2> <successors-2B3>
2389
2390
2390 Here rev2 has two possible (i.e. divergent) successors sets. The first
2391 Here rev2 has two possible (i.e. divergent) successors sets. The first
2391 holds one element, whereas the second holds three (i.e. the changeset has
2392 holds one element, whereas the second holds three (i.e. the changeset has
2392 been split).
2393 been split).
2393 """
2394 """
2394 # passed to successorssets caching computation from one call to another
2395 # passed to successorssets caching computation from one call to another
2395 cache = {}
2396 cache = {}
2396 ctx2str = bytes
2397 ctx2str = bytes
2397 node2str = short
2398 node2str = short
2398 for rev in scmutil.revrange(repo, revs):
2399 for rev in scmutil.revrange(repo, revs):
2399 ctx = repo[rev]
2400 ctx = repo[rev]
2400 ui.write('%s\n'% ctx2str(ctx))
2401 ui.write('%s\n'% ctx2str(ctx))
2401 for succsset in obsutil.successorssets(repo, ctx.node(),
2402 for succsset in obsutil.successorssets(repo, ctx.node(),
2402 closest=opts[r'closest'],
2403 closest=opts[r'closest'],
2403 cache=cache):
2404 cache=cache):
2404 if succsset:
2405 if succsset:
2405 ui.write(' ')
2406 ui.write(' ')
2406 ui.write(node2str(succsset[0]))
2407 ui.write(node2str(succsset[0]))
2407 for node in succsset[1:]:
2408 for node in succsset[1:]:
2408 ui.write(' ')
2409 ui.write(' ')
2409 ui.write(node2str(node))
2410 ui.write(node2str(node))
2410 ui.write('\n')
2411 ui.write('\n')
2411
2412
2412 @command('debugtemplate',
2413 @command('debugtemplate',
2413 [('r', 'rev', [], _('apply template on changesets'), _('REV')),
2414 [('r', 'rev', [], _('apply template on changesets'), _('REV')),
2414 ('D', 'define', [], _('define template keyword'), _('KEY=VALUE'))],
2415 ('D', 'define', [], _('define template keyword'), _('KEY=VALUE'))],
2415 _('[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
2416 _('[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
2416 optionalrepo=True)
2417 optionalrepo=True)
2417 def debugtemplate(ui, repo, tmpl, **opts):
2418 def debugtemplate(ui, repo, tmpl, **opts):
2418 """parse and apply a template
2419 """parse and apply a template
2419
2420
2420 If -r/--rev is given, the template is processed as a log template and
2421 If -r/--rev is given, the template is processed as a log template and
2421 applied to the given changesets. Otherwise, it is processed as a generic
2422 applied to the given changesets. Otherwise, it is processed as a generic
2422 template.
2423 template.
2423
2424
2424 Use --verbose to print the parsed tree.
2425 Use --verbose to print the parsed tree.
2425 """
2426 """
2426 revs = None
2427 revs = None
2427 if opts[r'rev']:
2428 if opts[r'rev']:
2428 if repo is None:
2429 if repo is None:
2429 raise error.RepoError(_('there is no Mercurial repository here '
2430 raise error.RepoError(_('there is no Mercurial repository here '
2430 '(.hg not found)'))
2431 '(.hg not found)'))
2431 revs = scmutil.revrange(repo, opts[r'rev'])
2432 revs = scmutil.revrange(repo, opts[r'rev'])
2432
2433
2433 props = {}
2434 props = {}
2434 for d in opts[r'define']:
2435 for d in opts[r'define']:
2435 try:
2436 try:
2436 k, v = (e.strip() for e in d.split('=', 1))
2437 k, v = (e.strip() for e in d.split('=', 1))
2437 if not k or k == 'ui':
2438 if not k or k == 'ui':
2438 raise ValueError
2439 raise ValueError
2439 props[k] = v
2440 props[k] = v
2440 except ValueError:
2441 except ValueError:
2441 raise error.Abort(_('malformed keyword definition: %s') % d)
2442 raise error.Abort(_('malformed keyword definition: %s') % d)
2442
2443
2443 if ui.verbose:
2444 if ui.verbose:
2444 aliases = ui.configitems('templatealias')
2445 aliases = ui.configitems('templatealias')
2445 tree = templater.parse(tmpl)
2446 tree = templater.parse(tmpl)
2446 ui.note(templater.prettyformat(tree), '\n')
2447 ui.note(templater.prettyformat(tree), '\n')
2447 newtree = templater.expandaliases(tree, aliases)
2448 newtree = templater.expandaliases(tree, aliases)
2448 if newtree != tree:
2449 if newtree != tree:
2449 ui.note(("* expanded:\n"), templater.prettyformat(newtree), '\n')
2450 ui.note(("* expanded:\n"), templater.prettyformat(newtree), '\n')
2450
2451
2451 if revs is None:
2452 if revs is None:
2452 tres = formatter.templateresources(ui, repo)
2453 tres = formatter.templateresources(ui, repo)
2453 t = formatter.maketemplater(ui, tmpl, resources=tres)
2454 t = formatter.maketemplater(ui, tmpl, resources=tres)
2454 ui.write(t.render(props))
2455 ui.write(t.render(props))
2455 else:
2456 else:
2456 displayer = logcmdutil.maketemplater(ui, repo, tmpl)
2457 displayer = logcmdutil.maketemplater(ui, repo, tmpl)
2457 for r in revs:
2458 for r in revs:
2458 displayer.show(repo[r], **pycompat.strkwargs(props))
2459 displayer.show(repo[r], **pycompat.strkwargs(props))
2459 displayer.close()
2460 displayer.close()
2460
2461
2461 @command('debugupdatecaches', [])
2462 @command('debugupdatecaches', [])
2462 def debugupdatecaches(ui, repo, *pats, **opts):
2463 def debugupdatecaches(ui, repo, *pats, **opts):
2463 """warm all known caches in the repository"""
2464 """warm all known caches in the repository"""
2464 with repo.wlock(), repo.lock():
2465 with repo.wlock(), repo.lock():
2465 repo.updatecaches()
2466 repo.updatecaches()
2466
2467
2467 @command('debugupgraderepo', [
2468 @command('debugupgraderepo', [
2468 ('o', 'optimize', [], _('extra optimization to perform'), _('NAME')),
2469 ('o', 'optimize', [], _('extra optimization to perform'), _('NAME')),
2469 ('', 'run', False, _('performs an upgrade')),
2470 ('', 'run', False, _('performs an upgrade')),
2470 ])
2471 ])
2471 def debugupgraderepo(ui, repo, run=False, optimize=None):
2472 def debugupgraderepo(ui, repo, run=False, optimize=None):
2472 """upgrade a repository to use different features
2473 """upgrade a repository to use different features
2473
2474
2474 If no arguments are specified, the repository is evaluated for upgrade
2475 If no arguments are specified, the repository is evaluated for upgrade
2475 and a list of problems and potential optimizations is printed.
2476 and a list of problems and potential optimizations is printed.
2476
2477
2477 With ``--run``, a repository upgrade is performed. Behavior of the upgrade
2478 With ``--run``, a repository upgrade is performed. Behavior of the upgrade
2478 can be influenced via additional arguments. More details will be provided
2479 can be influenced via additional arguments. More details will be provided
2479 by the command output when run without ``--run``.
2480 by the command output when run without ``--run``.
2480
2481
2481 During the upgrade, the repository will be locked and no writes will be
2482 During the upgrade, the repository will be locked and no writes will be
2482 allowed.
2483 allowed.
2483
2484
2484 At the end of the upgrade, the repository may not be readable while new
2485 At the end of the upgrade, the repository may not be readable while new
2485 repository data is swapped in. This window will be as long as it takes to
2486 repository data is swapped in. This window will be as long as it takes to
2486 rename some directories inside the ``.hg`` directory. On most machines, this
2487 rename some directories inside the ``.hg`` directory. On most machines, this
2487 should complete almost instantaneously and the chances of a consumer being
2488 should complete almost instantaneously and the chances of a consumer being
2488 unable to access the repository should be low.
2489 unable to access the repository should be low.
2489 """
2490 """
2490 return upgrade.upgraderepo(ui, repo, run=run, optimize=optimize)
2491 return upgrade.upgraderepo(ui, repo, run=run, optimize=optimize)
2491
2492
2492 @command('debugwalk', cmdutil.walkopts, _('[OPTION]... [FILE]...'),
2493 @command('debugwalk', cmdutil.walkopts, _('[OPTION]... [FILE]...'),
2493 inferrepo=True)
2494 inferrepo=True)
2494 def debugwalk(ui, repo, *pats, **opts):
2495 def debugwalk(ui, repo, *pats, **opts):
2495 """show how files match on given patterns"""
2496 """show how files match on given patterns"""
2496 opts = pycompat.byteskwargs(opts)
2497 opts = pycompat.byteskwargs(opts)
2497 m = scmutil.match(repo[None], pats, opts)
2498 m = scmutil.match(repo[None], pats, opts)
2498 ui.write(('matcher: %r\n' % m))
2499 ui.write(('matcher: %r\n' % m))
2499 items = list(repo[None].walk(m))
2500 items = list(repo[None].walk(m))
2500 if not items:
2501 if not items:
2501 return
2502 return
2502 f = lambda fn: fn
2503 f = lambda fn: fn
2503 if ui.configbool('ui', 'slash') and pycompat.ossep != '/':
2504 if ui.configbool('ui', 'slash') and pycompat.ossep != '/':
2504 f = lambda fn: util.normpath(fn)
2505 f = lambda fn: util.normpath(fn)
2505 fmt = 'f %%-%ds %%-%ds %%s' % (
2506 fmt = 'f %%-%ds %%-%ds %%s' % (
2506 max([len(abs) for abs in items]),
2507 max([len(abs) for abs in items]),
2507 max([len(m.rel(abs)) for abs in items]))
2508 max([len(m.rel(abs)) for abs in items]))
2508 for abs in items:
2509 for abs in items:
2509 line = fmt % (abs, f(m.rel(abs)), m.exact(abs) and 'exact' or '')
2510 line = fmt % (abs, f(m.rel(abs)), m.exact(abs) and 'exact' or '')
2510 ui.write("%s\n" % line.rstrip())
2511 ui.write("%s\n" % line.rstrip())
2511
2512
2512 @command('debugwireargs',
2513 @command('debugwireargs',
2513 [('', 'three', '', 'three'),
2514 [('', 'three', '', 'three'),
2514 ('', 'four', '', 'four'),
2515 ('', 'four', '', 'four'),
2515 ('', 'five', '', 'five'),
2516 ('', 'five', '', 'five'),
2516 ] + cmdutil.remoteopts,
2517 ] + cmdutil.remoteopts,
2517 _('REPO [OPTIONS]... [ONE [TWO]]'),
2518 _('REPO [OPTIONS]... [ONE [TWO]]'),
2518 norepo=True)
2519 norepo=True)
2519 def debugwireargs(ui, repopath, *vals, **opts):
2520 def debugwireargs(ui, repopath, *vals, **opts):
2520 opts = pycompat.byteskwargs(opts)
2521 opts = pycompat.byteskwargs(opts)
2521 repo = hg.peer(ui, opts, repopath)
2522 repo = hg.peer(ui, opts, repopath)
2522 for opt in cmdutil.remoteopts:
2523 for opt in cmdutil.remoteopts:
2523 del opts[opt[1]]
2524 del opts[opt[1]]
2524 args = {}
2525 args = {}
2525 for k, v in opts.iteritems():
2526 for k, v in opts.iteritems():
2526 if v:
2527 if v:
2527 args[k] = v
2528 args[k] = v
2528 args = pycompat.strkwargs(args)
2529 args = pycompat.strkwargs(args)
2529 # run twice to check that we don't mess up the stream for the next command
2530 # run twice to check that we don't mess up the stream for the next command
2530 res1 = repo.debugwireargs(*vals, **args)
2531 res1 = repo.debugwireargs(*vals, **args)
2531 res2 = repo.debugwireargs(*vals, **args)
2532 res2 = repo.debugwireargs(*vals, **args)
2532 ui.write("%s\n" % res1)
2533 ui.write("%s\n" % res1)
2533 if res1 != res2:
2534 if res1 != res2:
2534 ui.warn("%s\n" % res2)
2535 ui.warn("%s\n" % res2)
2535
2536
2536 def _parsewirelangblocks(fh):
2537 def _parsewirelangblocks(fh):
2537 activeaction = None
2538 activeaction = None
2538 blocklines = []
2539 blocklines = []
2539
2540
2540 for line in fh:
2541 for line in fh:
2541 line = line.rstrip()
2542 line = line.rstrip()
2542 if not line:
2543 if not line:
2543 continue
2544 continue
2544
2545
2545 if line.startswith(b'#'):
2546 if line.startswith(b'#'):
2546 continue
2547 continue
2547
2548
2548 if not line.startswith(' '):
2549 if not line.startswith(' '):
2549 # New block. Flush previous one.
2550 # New block. Flush previous one.
2550 if activeaction:
2551 if activeaction:
2551 yield activeaction, blocklines
2552 yield activeaction, blocklines
2552
2553
2553 activeaction = line
2554 activeaction = line
2554 blocklines = []
2555 blocklines = []
2555 continue
2556 continue
2556
2557
2557 # Else we start with an indent.
2558 # Else we start with an indent.
2558
2559
2559 if not activeaction:
2560 if not activeaction:
2560 raise error.Abort(_('indented line outside of block'))
2561 raise error.Abort(_('indented line outside of block'))
2561
2562
2562 blocklines.append(line)
2563 blocklines.append(line)
2563
2564
2564 # Flush last block.
2565 # Flush last block.
2565 if activeaction:
2566 if activeaction:
2566 yield activeaction, blocklines
2567 yield activeaction, blocklines
2567
2568
2568 @command('debugwireproto',
2569 @command('debugwireproto',
2569 [
2570 [
2570 ('', 'localssh', False, _('start an SSH server for this repo')),
2571 ('', 'localssh', False, _('start an SSH server for this repo')),
2571 ('', 'peer', '', _('construct a specific version of the peer')),
2572 ('', 'peer', '', _('construct a specific version of the peer')),
2572 ('', 'noreadstderr', False, _('do not read from stderr of the remote')),
2573 ('', 'noreadstderr', False, _('do not read from stderr of the remote')),
2573 ] + cmdutil.remoteopts,
2574 ] + cmdutil.remoteopts,
2574 _('[REPO]'),
2575 _('[REPO]'),
2575 optionalrepo=True)
2576 optionalrepo=True)
2576 def debugwireproto(ui, repo, **opts):
2577 def debugwireproto(ui, repo, **opts):
2577 """send wire protocol commands to a server
2578 """send wire protocol commands to a server
2578
2579
2579 This command can be used to issue wire protocol commands to remote
2580 This command can be used to issue wire protocol commands to remote
2580 peers and to debug the raw data being exchanged.
2581 peers and to debug the raw data being exchanged.
2581
2582
2582 ``--localssh`` will start an SSH server against the current repository
2583 ``--localssh`` will start an SSH server against the current repository
2583 and connect to that. By default, the connection will perform a handshake
2584 and connect to that. By default, the connection will perform a handshake
2584 and establish an appropriate peer instance.
2585 and establish an appropriate peer instance.
2585
2586
2586 ``--peer`` can be used to bypass the handshake protocol and construct a
2587 ``--peer`` can be used to bypass the handshake protocol and construct a
2587 peer instance using the specified class type. Valid values are ``raw``,
2588 peer instance using the specified class type. Valid values are ``raw``,
2588 ``ssh1``, and ``ssh2``. ``raw`` instances only allow sending raw data
2589 ``ssh1``, and ``ssh2``. ``raw`` instances only allow sending raw data
2589 payloads and don't support higher-level command actions.
2590 payloads and don't support higher-level command actions.
2590
2591
2591 ``--noreadstderr`` can be used to disable automatic reading from stderr
2592 ``--noreadstderr`` can be used to disable automatic reading from stderr
2592 of the peer (for SSH connections only). Disabling automatic reading of
2593 of the peer (for SSH connections only). Disabling automatic reading of
2593 stderr is useful for making output more deterministic.
2594 stderr is useful for making output more deterministic.
2594
2595
2595 Commands are issued via a mini language which is specified via stdin.
2596 Commands are issued via a mini language which is specified via stdin.
2596 The language consists of individual actions to perform. An action is
2597 The language consists of individual actions to perform. An action is
2597 defined by a block. A block is defined as a line with no leading
2598 defined by a block. A block is defined as a line with no leading
2598 space followed by 0 or more lines with leading space. Blocks are
2599 space followed by 0 or more lines with leading space. Blocks are
2599 effectively a high-level command with additional metadata.
2600 effectively a high-level command with additional metadata.
2600
2601
2601 Lines beginning with ``#`` are ignored.
2602 Lines beginning with ``#`` are ignored.
2602
2603
2603 The following sections denote available actions.
2604 The following sections denote available actions.
2604
2605
2605 raw
2606 raw
2606 ---
2607 ---
2607
2608
2608 Send raw data to the server.
2609 Send raw data to the server.
2609
2610
2610 The block payload contains the raw data to send as one atomic send
2611 The block payload contains the raw data to send as one atomic send
2611 operation. The data may not actually be delivered in a single system
2612 operation. The data may not actually be delivered in a single system
2612 call: it depends on the abilities of the transport being used.
2613 call: it depends on the abilities of the transport being used.
2613
2614
2614 Each line in the block is de-indented and concatenated. Then, that
2615 Each line in the block is de-indented and concatenated. Then, that
2615 value is evaluated as a Python b'' literal. This allows the use of
2616 value is evaluated as a Python b'' literal. This allows the use of
2616 backslash escaping, etc.
2617 backslash escaping, etc.
2617
2618
2618 raw+
2619 raw+
2619 ----
2620 ----
2620
2621
2621 Behaves like ``raw`` except flushes output afterwards.
2622 Behaves like ``raw`` except flushes output afterwards.
2622
2623
2623 command <X>
2624 command <X>
2624 -----------
2625 -----------
2625
2626
2626 Send a request to run a named command, whose name follows the ``command``
2627 Send a request to run a named command, whose name follows the ``command``
2627 string.
2628 string.
2628
2629
2629 Arguments to the command are defined as lines in this block. The format of
2630 Arguments to the command are defined as lines in this block. The format of
2630 each line is ``<key> <value>``. e.g.::
2631 each line is ``<key> <value>``. e.g.::
2631
2632
2632 command listkeys
2633 command listkeys
2633 namespace bookmarks
2634 namespace bookmarks
2634
2635
2635 Values are interpreted as Python b'' literals. This allows encoding
2636 Values are interpreted as Python b'' literals. This allows encoding
2636 special byte sequences via backslash escaping.
2637 special byte sequences via backslash escaping.
2637
2638
2638 The following arguments have special meaning:
2639 The following arguments have special meaning:
2639
2640
2640 ``PUSHFILE``
2641 ``PUSHFILE``
2641 When defined, the *push* mechanism of the peer will be used instead
2642 When defined, the *push* mechanism of the peer will be used instead
2642 of the static request-response mechanism and the content of the
2643 of the static request-response mechanism and the content of the
2643 file specified in the value of this argument will be sent as the
2644 file specified in the value of this argument will be sent as the
2644 command payload.
2645 command payload.
2645
2646
2646 This can be used to submit a local bundle file to the remote.
2647 This can be used to submit a local bundle file to the remote.
2647
2648
2648 batchbegin
2649 batchbegin
2649 ----------
2650 ----------
2650
2651
2651 Instruct the peer to begin a batched send.
2652 Instruct the peer to begin a batched send.
2652
2653
2653 All ``command`` blocks are queued for execution until the next
2654 All ``command`` blocks are queued for execution until the next
2654 ``batchsubmit`` block.
2655 ``batchsubmit`` block.
2655
2656
2656 batchsubmit
2657 batchsubmit
2657 -----------
2658 -----------
2658
2659
2659 Submit previously queued ``command`` blocks as a batch request.
2660 Submit previously queued ``command`` blocks as a batch request.
2660
2661
2661 This action MUST be paired with a ``batchbegin`` action.
2662 This action MUST be paired with a ``batchbegin`` action.
2662
2663
2663 close
2664 close
2664 -----
2665 -----
2665
2666
2666 Close the connection to the server.
2667 Close the connection to the server.
2667
2668
2668 flush
2669 flush
2669 -----
2670 -----
2670
2671
2671 Flush data written to the server.
2672 Flush data written to the server.
2672
2673
2673 readavailable
2674 readavailable
2674 -------------
2675 -------------
2675
2676
2676 Read all available data from the server.
2677 Read all available data from the server.
2677
2678
2678 If the connection to the server encompasses multiple pipes, we poll both
2679 If the connection to the server encompasses multiple pipes, we poll both
2679 pipes and read available data.
2680 pipes and read available data.
2680
2681
2681 readline
2682 readline
2682 --------
2683 --------
2683
2684
2684 Read a line of output from the server. If there are multiple output
2685 Read a line of output from the server. If there are multiple output
2685 pipes, reads only the main pipe.
2686 pipes, reads only the main pipe.
2686 """
2687 """
2687 opts = pycompat.byteskwargs(opts)
2688 opts = pycompat.byteskwargs(opts)
2688
2689
2689 if opts['localssh'] and not repo:
2690 if opts['localssh'] and not repo:
2690 raise error.Abort(_('--localssh requires a repository'))
2691 raise error.Abort(_('--localssh requires a repository'))
2691
2692
2692 if opts['peer'] and opts['peer'] not in ('raw', 'ssh1', 'ssh2'):
2693 if opts['peer'] and opts['peer'] not in ('raw', 'ssh1', 'ssh2'):
2693 raise error.Abort(_('invalid value for --peer'),
2694 raise error.Abort(_('invalid value for --peer'),
2694 hint=_('valid values are "raw", "ssh1", and "ssh2"'))
2695 hint=_('valid values are "raw", "ssh1", and "ssh2"'))
2695
2696
2696 if ui.interactive():
2697 if ui.interactive():
2697 ui.write(_('(waiting for commands on stdin)\n'))
2698 ui.write(_('(waiting for commands on stdin)\n'))
2698
2699
2699 blocks = list(_parsewirelangblocks(ui.fin))
2700 blocks = list(_parsewirelangblocks(ui.fin))
2700
2701
2701 proc = None
2702 proc = None
2702
2703
2703 if opts['localssh']:
2704 if opts['localssh']:
2704 # We start the SSH server in its own process so there is process
2705 # We start the SSH server in its own process so there is process
2705 # separation. This prevents a whole class of potential bugs around
2706 # separation. This prevents a whole class of potential bugs around
2706 # shared state from interfering with server operation.
2707 # shared state from interfering with server operation.
2707 args = util.hgcmd() + [
2708 args = util.hgcmd() + [
2708 '-R', repo.root,
2709 '-R', repo.root,
2709 'debugserve', '--sshstdio',
2710 'debugserve', '--sshstdio',
2710 ]
2711 ]
2711 proc = subprocess.Popen(args, stdin=subprocess.PIPE,
2712 proc = subprocess.Popen(args, stdin=subprocess.PIPE,
2712 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
2713 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
2713 bufsize=0)
2714 bufsize=0)
2714
2715
2715 stdin = proc.stdin
2716 stdin = proc.stdin
2716 stdout = proc.stdout
2717 stdout = proc.stdout
2717 stderr = proc.stderr
2718 stderr = proc.stderr
2718
2719
2719 # We turn the pipes into observers so we can log I/O.
2720 # We turn the pipes into observers so we can log I/O.
2720 if ui.verbose or opts['peer'] == 'raw':
2721 if ui.verbose or opts['peer'] == 'raw':
2721 stdin = util.makeloggingfileobject(ui, proc.stdin, b'i',
2722 stdin = util.makeloggingfileobject(ui, proc.stdin, b'i',
2722 logdata=True)
2723 logdata=True)
2723 stdout = util.makeloggingfileobject(ui, proc.stdout, b'o',
2724 stdout = util.makeloggingfileobject(ui, proc.stdout, b'o',
2724 logdata=True)
2725 logdata=True)
2725 stderr = util.makeloggingfileobject(ui, proc.stderr, b'e',
2726 stderr = util.makeloggingfileobject(ui, proc.stderr, b'e',
2726 logdata=True)
2727 logdata=True)
2727
2728
2728 # --localssh also implies the peer connection settings.
2729 # --localssh also implies the peer connection settings.
2729
2730
2730 url = 'ssh://localserver'
2731 url = 'ssh://localserver'
2731 autoreadstderr = not opts['noreadstderr']
2732 autoreadstderr = not opts['noreadstderr']
2732
2733
2733 if opts['peer'] == 'ssh1':
2734 if opts['peer'] == 'ssh1':
2734 ui.write(_('creating ssh peer for wire protocol version 1\n'))
2735 ui.write(_('creating ssh peer for wire protocol version 1\n'))
2735 peer = sshpeer.sshv1peer(ui, url, proc, stdin, stdout, stderr,
2736 peer = sshpeer.sshv1peer(ui, url, proc, stdin, stdout, stderr,
2736 None, autoreadstderr=autoreadstderr)
2737 None, autoreadstderr=autoreadstderr)
2737 elif opts['peer'] == 'ssh2':
2738 elif opts['peer'] == 'ssh2':
2738 ui.write(_('creating ssh peer for wire protocol version 2\n'))
2739 ui.write(_('creating ssh peer for wire protocol version 2\n'))
2739 peer = sshpeer.sshv2peer(ui, url, proc, stdin, stdout, stderr,
2740 peer = sshpeer.sshv2peer(ui, url, proc, stdin, stdout, stderr,
2740 None, autoreadstderr=autoreadstderr)
2741 None, autoreadstderr=autoreadstderr)
2741 elif opts['peer'] == 'raw':
2742 elif opts['peer'] == 'raw':
2742 ui.write(_('using raw connection to peer\n'))
2743 ui.write(_('using raw connection to peer\n'))
2743 peer = None
2744 peer = None
2744 else:
2745 else:
2745 ui.write(_('creating ssh peer from handshake results\n'))
2746 ui.write(_('creating ssh peer from handshake results\n'))
2746 peer = sshpeer.makepeer(ui, url, proc, stdin, stdout, stderr,
2747 peer = sshpeer.makepeer(ui, url, proc, stdin, stdout, stderr,
2747 autoreadstderr=autoreadstderr)
2748 autoreadstderr=autoreadstderr)
2748
2749
2749 else:
2750 else:
2750 raise error.Abort(_('only --localssh is currently supported'))
2751 raise error.Abort(_('only --localssh is currently supported'))
2751
2752
2752 batchedcommands = None
2753 batchedcommands = None
2753
2754
2754 # Now perform actions based on the parsed wire language instructions.
2755 # Now perform actions based on the parsed wire language instructions.
2755 for action, lines in blocks:
2756 for action, lines in blocks:
2756 if action in ('raw', 'raw+'):
2757 if action in ('raw', 'raw+'):
2757 # Concatenate the data together.
2758 # Concatenate the data together.
2758 data = ''.join(l.lstrip() for l in lines)
2759 data = ''.join(l.lstrip() for l in lines)
2759 data = util.unescapestr(data)
2760 data = util.unescapestr(data)
2760 stdin.write(data)
2761 stdin.write(data)
2761
2762
2762 if action == 'raw+':
2763 if action == 'raw+':
2763 stdin.flush()
2764 stdin.flush()
2764 elif action == 'flush':
2765 elif action == 'flush':
2765 stdin.flush()
2766 stdin.flush()
2766 elif action.startswith('command'):
2767 elif action.startswith('command'):
2767 if not peer:
2768 if not peer:
2768 raise error.Abort(_('cannot send commands unless peer instance '
2769 raise error.Abort(_('cannot send commands unless peer instance '
2769 'is available'))
2770 'is available'))
2770
2771
2771 command = action.split(' ', 1)[1]
2772 command = action.split(' ', 1)[1]
2772
2773
2773 args = {}
2774 args = {}
2774 for line in lines:
2775 for line in lines:
2775 # We need to allow empty values.
2776 # We need to allow empty values.
2776 fields = line.lstrip().split(' ', 1)
2777 fields = line.lstrip().split(' ', 1)
2777 if len(fields) == 1:
2778 if len(fields) == 1:
2778 key = fields[0]
2779 key = fields[0]
2779 value = ''
2780 value = ''
2780 else:
2781 else:
2781 key, value = fields
2782 key, value = fields
2782
2783
2783 args[key] = util.unescapestr(value)
2784 args[key] = util.unescapestr(value)
2784
2785
2785 if batchedcommands is not None:
2786 if batchedcommands is not None:
2786 batchedcommands.append((command, args))
2787 batchedcommands.append((command, args))
2787 continue
2788 continue
2788
2789
2789 ui.status(_('sending %s command\n') % command)
2790 ui.status(_('sending %s command\n') % command)
2790
2791
2791 if 'PUSHFILE' in args:
2792 if 'PUSHFILE' in args:
2792 with open(args['PUSHFILE'], r'rb') as fh:
2793 with open(args['PUSHFILE'], r'rb') as fh:
2793 del args['PUSHFILE']
2794 del args['PUSHFILE']
2794 res, output = peer._callpush(command, fh,
2795 res, output = peer._callpush(command, fh,
2795 **pycompat.strkwargs(args))
2796 **pycompat.strkwargs(args))
2796 ui.status(_('result: %s\n') % util.escapedata(res))
2797 ui.status(_('result: %s\n') % util.escapedata(res))
2797 ui.status(_('remote output: %s\n') %
2798 ui.status(_('remote output: %s\n') %
2798 util.escapedata(output))
2799 util.escapedata(output))
2799 else:
2800 else:
2800 res = peer._call(command, **pycompat.strkwargs(args))
2801 res = peer._call(command, **pycompat.strkwargs(args))
2801 ui.status(_('response: %s\n') % util.escapedata(res))
2802 ui.status(_('response: %s\n') % util.escapedata(res))
2802
2803
2803 elif action == 'batchbegin':
2804 elif action == 'batchbegin':
2804 if batchedcommands is not None:
2805 if batchedcommands is not None:
2805 raise error.Abort(_('nested batchbegin not allowed'))
2806 raise error.Abort(_('nested batchbegin not allowed'))
2806
2807
2807 batchedcommands = []
2808 batchedcommands = []
2808 elif action == 'batchsubmit':
2809 elif action == 'batchsubmit':
2809 # There is a batching API we could go through. But it would be
2810 # There is a batching API we could go through. But it would be
2810 # difficult to normalize requests into function calls. It is easier
2811 # difficult to normalize requests into function calls. It is easier
2811 # to bypass this layer and normalize to commands + args.
2812 # to bypass this layer and normalize to commands + args.
2812 ui.status(_('sending batch with %d sub-commands\n') %
2813 ui.status(_('sending batch with %d sub-commands\n') %
2813 len(batchedcommands))
2814 len(batchedcommands))
2814 for i, chunk in enumerate(peer._submitbatch(batchedcommands)):
2815 for i, chunk in enumerate(peer._submitbatch(batchedcommands)):
2815 ui.status(_('response #%d: %s\n') % (i, util.escapedata(chunk)))
2816 ui.status(_('response #%d: %s\n') % (i, util.escapedata(chunk)))
2816
2817
2817 batchedcommands = None
2818 batchedcommands = None
2818 elif action == 'close':
2819 elif action == 'close':
2819 peer.close()
2820 peer.close()
2820 elif action == 'readavailable':
2821 elif action == 'readavailable':
2821 fds = [stdout.fileno(), stderr.fileno()]
2822 fds = [stdout.fileno(), stderr.fileno()]
2822 try:
2823 try:
2823 act = util.poll(fds)
2824 act = util.poll(fds)
2824 except NotImplementedError:
2825 except NotImplementedError:
2825 # non supported yet case, assume all have data.
2826 # non supported yet case, assume all have data.
2826 act = fds
2827 act = fds
2827
2828
2828 if stdout.fileno() in act:
2829 if stdout.fileno() in act:
2829 util.readpipe(stdout)
2830 util.readpipe(stdout)
2830 if stderr.fileno() in act:
2831 if stderr.fileno() in act:
2831 util.readpipe(stderr)
2832 util.readpipe(stderr)
2832 elif action == 'readline':
2833 elif action == 'readline':
2833 stdout.readline()
2834 stdout.readline()
2834 else:
2835 else:
2835 raise error.Abort(_('unknown action: %s') % action)
2836 raise error.Abort(_('unknown action: %s') % action)
2836
2837
2837 if batchedcommands is not None:
2838 if batchedcommands is not None:
2838 raise error.Abort(_('unclosed "batchbegin" request'))
2839 raise error.Abort(_('unclosed "batchbegin" request'))
2839
2840
2840 if peer:
2841 if peer:
2841 peer.close()
2842 peer.close()
2842
2843
2843 if proc:
2844 if proc:
2844 proc.kill()
2845 proc.kill()
@@ -1,1488 +1,1489 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import contextlib
11 import contextlib
12 import errno
12 import errno
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import nullid
17 from .node import nullid
18 from . import (
18 from . import (
19 encoding,
19 encoding,
20 error,
20 error,
21 match as matchmod,
21 match as matchmod,
22 pathutil,
22 pathutil,
23 policy,
23 policy,
24 pycompat,
24 pycompat,
25 scmutil,
25 scmutil,
26 txnutil,
26 txnutil,
27 util,
27 util,
28 )
28 )
29
29
30 parsers = policy.importmod(r'parsers')
30 parsers = policy.importmod(r'parsers')
31
31
32 propertycache = util.propertycache
32 propertycache = util.propertycache
33 filecache = scmutil.filecache
33 filecache = scmutil.filecache
34 _rangemask = 0x7fffffff
34 _rangemask = 0x7fffffff
35
35
36 dirstatetuple = parsers.dirstatetuple
36 dirstatetuple = parsers.dirstatetuple
37
37
38 class repocache(filecache):
38 class repocache(filecache):
39 """filecache for files in .hg/"""
39 """filecache for files in .hg/"""
40 def join(self, obj, fname):
40 def join(self, obj, fname):
41 return obj._opener.join(fname)
41 return obj._opener.join(fname)
42
42
43 class rootcache(filecache):
43 class rootcache(filecache):
44 """filecache for files in the repository root"""
44 """filecache for files in the repository root"""
45 def join(self, obj, fname):
45 def join(self, obj, fname):
46 return obj._join(fname)
46 return obj._join(fname)
47
47
48 def _getfsnow(vfs):
48 def _getfsnow(vfs):
49 '''Get "now" timestamp on filesystem'''
49 '''Get "now" timestamp on filesystem'''
50 tmpfd, tmpname = vfs.mkstemp()
50 tmpfd, tmpname = vfs.mkstemp()
51 try:
51 try:
52 return os.fstat(tmpfd).st_mtime
52 return os.fstat(tmpfd)[stat.ST_MTIME]
53 finally:
53 finally:
54 os.close(tmpfd)
54 os.close(tmpfd)
55 vfs.unlink(tmpname)
55 vfs.unlink(tmpname)
56
56
57 class dirstate(object):
57 class dirstate(object):
58
58
59 def __init__(self, opener, ui, root, validate, sparsematchfn):
59 def __init__(self, opener, ui, root, validate, sparsematchfn):
60 '''Create a new dirstate object.
60 '''Create a new dirstate object.
61
61
62 opener is an open()-like callable that can be used to open the
62 opener is an open()-like callable that can be used to open the
63 dirstate file; root is the root of the directory tracked by
63 dirstate file; root is the root of the directory tracked by
64 the dirstate.
64 the dirstate.
65 '''
65 '''
66 self._opener = opener
66 self._opener = opener
67 self._validate = validate
67 self._validate = validate
68 self._root = root
68 self._root = root
69 self._sparsematchfn = sparsematchfn
69 self._sparsematchfn = sparsematchfn
70 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
70 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
71 # UNC path pointing to root share (issue4557)
71 # UNC path pointing to root share (issue4557)
72 self._rootdir = pathutil.normasprefix(root)
72 self._rootdir = pathutil.normasprefix(root)
73 self._dirty = False
73 self._dirty = False
74 self._lastnormaltime = 0
74 self._lastnormaltime = 0
75 self._ui = ui
75 self._ui = ui
76 self._filecache = {}
76 self._filecache = {}
77 self._parentwriters = 0
77 self._parentwriters = 0
78 self._filename = 'dirstate'
78 self._filename = 'dirstate'
79 self._pendingfilename = '%s.pending' % self._filename
79 self._pendingfilename = '%s.pending' % self._filename
80 self._plchangecallbacks = {}
80 self._plchangecallbacks = {}
81 self._origpl = None
81 self._origpl = None
82 self._updatedfiles = set()
82 self._updatedfiles = set()
83 self._mapcls = dirstatemap
83 self._mapcls = dirstatemap
84
84
85 @contextlib.contextmanager
85 @contextlib.contextmanager
86 def parentchange(self):
86 def parentchange(self):
87 '''Context manager for handling dirstate parents.
87 '''Context manager for handling dirstate parents.
88
88
89 If an exception occurs in the scope of the context manager,
89 If an exception occurs in the scope of the context manager,
90 the incoherent dirstate won't be written when wlock is
90 the incoherent dirstate won't be written when wlock is
91 released.
91 released.
92 '''
92 '''
93 self._parentwriters += 1
93 self._parentwriters += 1
94 yield
94 yield
95 # Typically we want the "undo" step of a context manager in a
95 # Typically we want the "undo" step of a context manager in a
96 # finally block so it happens even when an exception
96 # finally block so it happens even when an exception
97 # occurs. In this case, however, we only want to decrement
97 # occurs. In this case, however, we only want to decrement
98 # parentwriters if the code in the with statement exits
98 # parentwriters if the code in the with statement exits
99 # normally, so we don't have a try/finally here on purpose.
99 # normally, so we don't have a try/finally here on purpose.
100 self._parentwriters -= 1
100 self._parentwriters -= 1
101
101
102 def pendingparentchange(self):
102 def pendingparentchange(self):
103 '''Returns true if the dirstate is in the middle of a set of changes
103 '''Returns true if the dirstate is in the middle of a set of changes
104 that modify the dirstate parent.
104 that modify the dirstate parent.
105 '''
105 '''
106 return self._parentwriters > 0
106 return self._parentwriters > 0
107
107
108 @propertycache
108 @propertycache
109 def _map(self):
109 def _map(self):
110 """Return the dirstate contents (see documentation for dirstatemap)."""
110 """Return the dirstate contents (see documentation for dirstatemap)."""
111 self._map = self._mapcls(self._ui, self._opener, self._root)
111 self._map = self._mapcls(self._ui, self._opener, self._root)
112 return self._map
112 return self._map
113
113
114 @property
114 @property
115 def _sparsematcher(self):
115 def _sparsematcher(self):
116 """The matcher for the sparse checkout.
116 """The matcher for the sparse checkout.
117
117
118 The working directory may not include every file from a manifest. The
118 The working directory may not include every file from a manifest. The
119 matcher obtained by this property will match a path if it is to be
119 matcher obtained by this property will match a path if it is to be
120 included in the working directory.
120 included in the working directory.
121 """
121 """
122 # TODO there is potential to cache this property. For now, the matcher
122 # TODO there is potential to cache this property. For now, the matcher
123 # is resolved on every access. (But the called function does use a
123 # is resolved on every access. (But the called function does use a
124 # cache to keep the lookup fast.)
124 # cache to keep the lookup fast.)
125 return self._sparsematchfn()
125 return self._sparsematchfn()
126
126
127 @repocache('branch')
127 @repocache('branch')
128 def _branch(self):
128 def _branch(self):
129 try:
129 try:
130 return self._opener.read("branch").strip() or "default"
130 return self._opener.read("branch").strip() or "default"
131 except IOError as inst:
131 except IOError as inst:
132 if inst.errno != errno.ENOENT:
132 if inst.errno != errno.ENOENT:
133 raise
133 raise
134 return "default"
134 return "default"
135
135
136 @property
136 @property
137 def _pl(self):
137 def _pl(self):
138 return self._map.parents()
138 return self._map.parents()
139
139
140 def hasdir(self, d):
140 def hasdir(self, d):
141 return self._map.hastrackeddir(d)
141 return self._map.hastrackeddir(d)
142
142
143 @rootcache('.hgignore')
143 @rootcache('.hgignore')
144 def _ignore(self):
144 def _ignore(self):
145 files = self._ignorefiles()
145 files = self._ignorefiles()
146 if not files:
146 if not files:
147 return matchmod.never(self._root, '')
147 return matchmod.never(self._root, '')
148
148
149 pats = ['include:%s' % f for f in files]
149 pats = ['include:%s' % f for f in files]
150 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
150 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
151
151
152 @propertycache
152 @propertycache
153 def _slash(self):
153 def _slash(self):
154 return self._ui.configbool('ui', 'slash') and pycompat.ossep != '/'
154 return self._ui.configbool('ui', 'slash') and pycompat.ossep != '/'
155
155
156 @propertycache
156 @propertycache
157 def _checklink(self):
157 def _checklink(self):
158 return util.checklink(self._root)
158 return util.checklink(self._root)
159
159
160 @propertycache
160 @propertycache
161 def _checkexec(self):
161 def _checkexec(self):
162 return util.checkexec(self._root)
162 return util.checkexec(self._root)
163
163
164 @propertycache
164 @propertycache
165 def _checkcase(self):
165 def _checkcase(self):
166 return not util.fscasesensitive(self._join('.hg'))
166 return not util.fscasesensitive(self._join('.hg'))
167
167
168 def _join(self, f):
168 def _join(self, f):
169 # much faster than os.path.join()
169 # much faster than os.path.join()
170 # it's safe because f is always a relative path
170 # it's safe because f is always a relative path
171 return self._rootdir + f
171 return self._rootdir + f
172
172
173 def flagfunc(self, buildfallback):
173 def flagfunc(self, buildfallback):
174 if self._checklink and self._checkexec:
174 if self._checklink and self._checkexec:
175 def f(x):
175 def f(x):
176 try:
176 try:
177 st = os.lstat(self._join(x))
177 st = os.lstat(self._join(x))
178 if util.statislink(st):
178 if util.statislink(st):
179 return 'l'
179 return 'l'
180 if util.statisexec(st):
180 if util.statisexec(st):
181 return 'x'
181 return 'x'
182 except OSError:
182 except OSError:
183 pass
183 pass
184 return ''
184 return ''
185 return f
185 return f
186
186
187 fallback = buildfallback()
187 fallback = buildfallback()
188 if self._checklink:
188 if self._checklink:
189 def f(x):
189 def f(x):
190 if os.path.islink(self._join(x)):
190 if os.path.islink(self._join(x)):
191 return 'l'
191 return 'l'
192 if 'x' in fallback(x):
192 if 'x' in fallback(x):
193 return 'x'
193 return 'x'
194 return ''
194 return ''
195 return f
195 return f
196 if self._checkexec:
196 if self._checkexec:
197 def f(x):
197 def f(x):
198 if 'l' in fallback(x):
198 if 'l' in fallback(x):
199 return 'l'
199 return 'l'
200 if util.isexec(self._join(x)):
200 if util.isexec(self._join(x)):
201 return 'x'
201 return 'x'
202 return ''
202 return ''
203 return f
203 return f
204 else:
204 else:
205 return fallback
205 return fallback
206
206
207 @propertycache
207 @propertycache
208 def _cwd(self):
208 def _cwd(self):
209 # internal config: ui.forcecwd
209 # internal config: ui.forcecwd
210 forcecwd = self._ui.config('ui', 'forcecwd')
210 forcecwd = self._ui.config('ui', 'forcecwd')
211 if forcecwd:
211 if forcecwd:
212 return forcecwd
212 return forcecwd
213 return pycompat.getcwd()
213 return pycompat.getcwd()
214
214
215 def getcwd(self):
215 def getcwd(self):
216 '''Return the path from which a canonical path is calculated.
216 '''Return the path from which a canonical path is calculated.
217
217
218 This path should be used to resolve file patterns or to convert
218 This path should be used to resolve file patterns or to convert
219 canonical paths back to file paths for display. It shouldn't be
219 canonical paths back to file paths for display. It shouldn't be
220 used to get real file paths. Use vfs functions instead.
220 used to get real file paths. Use vfs functions instead.
221 '''
221 '''
222 cwd = self._cwd
222 cwd = self._cwd
223 if cwd == self._root:
223 if cwd == self._root:
224 return ''
224 return ''
225 # self._root ends with a path separator if self._root is '/' or 'C:\'
225 # self._root ends with a path separator if self._root is '/' or 'C:\'
226 rootsep = self._root
226 rootsep = self._root
227 if not util.endswithsep(rootsep):
227 if not util.endswithsep(rootsep):
228 rootsep += pycompat.ossep
228 rootsep += pycompat.ossep
229 if cwd.startswith(rootsep):
229 if cwd.startswith(rootsep):
230 return cwd[len(rootsep):]
230 return cwd[len(rootsep):]
231 else:
231 else:
232 # we're outside the repo. return an absolute path.
232 # we're outside the repo. return an absolute path.
233 return cwd
233 return cwd
234
234
235 def pathto(self, f, cwd=None):
235 def pathto(self, f, cwd=None):
236 if cwd is None:
236 if cwd is None:
237 cwd = self.getcwd()
237 cwd = self.getcwd()
238 path = util.pathto(self._root, cwd, f)
238 path = util.pathto(self._root, cwd, f)
239 if self._slash:
239 if self._slash:
240 return util.pconvert(path)
240 return util.pconvert(path)
241 return path
241 return path
242
242
243 def __getitem__(self, key):
243 def __getitem__(self, key):
244 '''Return the current state of key (a filename) in the dirstate.
244 '''Return the current state of key (a filename) in the dirstate.
245
245
246 States are:
246 States are:
247 n normal
247 n normal
248 m needs merging
248 m needs merging
249 r marked for removal
249 r marked for removal
250 a marked for addition
250 a marked for addition
251 ? not tracked
251 ? not tracked
252 '''
252 '''
253 return self._map.get(key, ("?",))[0]
253 return self._map.get(key, ("?",))[0]
254
254
255 def __contains__(self, key):
255 def __contains__(self, key):
256 return key in self._map
256 return key in self._map
257
257
258 def __iter__(self):
258 def __iter__(self):
259 return iter(sorted(self._map))
259 return iter(sorted(self._map))
260
260
261 def items(self):
261 def items(self):
262 return self._map.iteritems()
262 return self._map.iteritems()
263
263
264 iteritems = items
264 iteritems = items
265
265
266 def parents(self):
266 def parents(self):
267 return [self._validate(p) for p in self._pl]
267 return [self._validate(p) for p in self._pl]
268
268
269 def p1(self):
269 def p1(self):
270 return self._validate(self._pl[0])
270 return self._validate(self._pl[0])
271
271
272 def p2(self):
272 def p2(self):
273 return self._validate(self._pl[1])
273 return self._validate(self._pl[1])
274
274
275 def branch(self):
275 def branch(self):
276 return encoding.tolocal(self._branch)
276 return encoding.tolocal(self._branch)
277
277
278 def setparents(self, p1, p2=nullid):
278 def setparents(self, p1, p2=nullid):
279 """Set dirstate parents to p1 and p2.
279 """Set dirstate parents to p1 and p2.
280
280
281 When moving from two parents to one, 'm' merged entries a
281 When moving from two parents to one, 'm' merged entries a
282 adjusted to normal and previous copy records discarded and
282 adjusted to normal and previous copy records discarded and
283 returned by the call.
283 returned by the call.
284
284
285 See localrepo.setparents()
285 See localrepo.setparents()
286 """
286 """
287 if self._parentwriters == 0:
287 if self._parentwriters == 0:
288 raise ValueError("cannot set dirstate parent without "
288 raise ValueError("cannot set dirstate parent without "
289 "calling dirstate.beginparentchange")
289 "calling dirstate.beginparentchange")
290
290
291 self._dirty = True
291 self._dirty = True
292 oldp2 = self._pl[1]
292 oldp2 = self._pl[1]
293 if self._origpl is None:
293 if self._origpl is None:
294 self._origpl = self._pl
294 self._origpl = self._pl
295 self._map.setparents(p1, p2)
295 self._map.setparents(p1, p2)
296 copies = {}
296 copies = {}
297 if oldp2 != nullid and p2 == nullid:
297 if oldp2 != nullid and p2 == nullid:
298 candidatefiles = self._map.nonnormalset.union(
298 candidatefiles = self._map.nonnormalset.union(
299 self._map.otherparentset)
299 self._map.otherparentset)
300 for f in candidatefiles:
300 for f in candidatefiles:
301 s = self._map.get(f)
301 s = self._map.get(f)
302 if s is None:
302 if s is None:
303 continue
303 continue
304
304
305 # Discard 'm' markers when moving away from a merge state
305 # Discard 'm' markers when moving away from a merge state
306 if s[0] == 'm':
306 if s[0] == 'm':
307 source = self._map.copymap.get(f)
307 source = self._map.copymap.get(f)
308 if source:
308 if source:
309 copies[f] = source
309 copies[f] = source
310 self.normallookup(f)
310 self.normallookup(f)
311 # Also fix up otherparent markers
311 # Also fix up otherparent markers
312 elif s[0] == 'n' and s[2] == -2:
312 elif s[0] == 'n' and s[2] == -2:
313 source = self._map.copymap.get(f)
313 source = self._map.copymap.get(f)
314 if source:
314 if source:
315 copies[f] = source
315 copies[f] = source
316 self.add(f)
316 self.add(f)
317 return copies
317 return copies
318
318
319 def setbranch(self, branch):
319 def setbranch(self, branch):
320 self._branch = encoding.fromlocal(branch)
320 self._branch = encoding.fromlocal(branch)
321 f = self._opener('branch', 'w', atomictemp=True, checkambig=True)
321 f = self._opener('branch', 'w', atomictemp=True, checkambig=True)
322 try:
322 try:
323 f.write(self._branch + '\n')
323 f.write(self._branch + '\n')
324 f.close()
324 f.close()
325
325
326 # make sure filecache has the correct stat info for _branch after
326 # make sure filecache has the correct stat info for _branch after
327 # replacing the underlying file
327 # replacing the underlying file
328 ce = self._filecache['_branch']
328 ce = self._filecache['_branch']
329 if ce:
329 if ce:
330 ce.refresh()
330 ce.refresh()
331 except: # re-raises
331 except: # re-raises
332 f.discard()
332 f.discard()
333 raise
333 raise
334
334
335 def invalidate(self):
335 def invalidate(self):
336 '''Causes the next access to reread the dirstate.
336 '''Causes the next access to reread the dirstate.
337
337
338 This is different from localrepo.invalidatedirstate() because it always
338 This is different from localrepo.invalidatedirstate() because it always
339 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
339 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
340 check whether the dirstate has changed before rereading it.'''
340 check whether the dirstate has changed before rereading it.'''
341
341
342 for a in (r"_map", r"_branch", r"_ignore"):
342 for a in (r"_map", r"_branch", r"_ignore"):
343 if a in self.__dict__:
343 if a in self.__dict__:
344 delattr(self, a)
344 delattr(self, a)
345 self._lastnormaltime = 0
345 self._lastnormaltime = 0
346 self._dirty = False
346 self._dirty = False
347 self._updatedfiles.clear()
347 self._updatedfiles.clear()
348 self._parentwriters = 0
348 self._parentwriters = 0
349 self._origpl = None
349 self._origpl = None
350
350
351 def copy(self, source, dest):
351 def copy(self, source, dest):
352 """Mark dest as a copy of source. Unmark dest if source is None."""
352 """Mark dest as a copy of source. Unmark dest if source is None."""
353 if source == dest:
353 if source == dest:
354 return
354 return
355 self._dirty = True
355 self._dirty = True
356 if source is not None:
356 if source is not None:
357 self._map.copymap[dest] = source
357 self._map.copymap[dest] = source
358 self._updatedfiles.add(source)
358 self._updatedfiles.add(source)
359 self._updatedfiles.add(dest)
359 self._updatedfiles.add(dest)
360 elif self._map.copymap.pop(dest, None):
360 elif self._map.copymap.pop(dest, None):
361 self._updatedfiles.add(dest)
361 self._updatedfiles.add(dest)
362
362
363 def copied(self, file):
363 def copied(self, file):
364 return self._map.copymap.get(file, None)
364 return self._map.copymap.get(file, None)
365
365
366 def copies(self):
366 def copies(self):
367 return self._map.copymap
367 return self._map.copymap
368
368
369 def _addpath(self, f, state, mode, size, mtime):
369 def _addpath(self, f, state, mode, size, mtime):
370 oldstate = self[f]
370 oldstate = self[f]
371 if state == 'a' or oldstate == 'r':
371 if state == 'a' or oldstate == 'r':
372 scmutil.checkfilename(f)
372 scmutil.checkfilename(f)
373 if self._map.hastrackeddir(f):
373 if self._map.hastrackeddir(f):
374 raise error.Abort(_('directory %r already in dirstate') % f)
374 raise error.Abort(_('directory %r already in dirstate') % f)
375 # shadows
375 # shadows
376 for d in util.finddirs(f):
376 for d in util.finddirs(f):
377 if self._map.hastrackeddir(d):
377 if self._map.hastrackeddir(d):
378 break
378 break
379 entry = self._map.get(d)
379 entry = self._map.get(d)
380 if entry is not None and entry[0] != 'r':
380 if entry is not None and entry[0] != 'r':
381 raise error.Abort(
381 raise error.Abort(
382 _('file %r in dirstate clashes with %r') % (d, f))
382 _('file %r in dirstate clashes with %r') % (d, f))
383 self._dirty = True
383 self._dirty = True
384 self._updatedfiles.add(f)
384 self._updatedfiles.add(f)
385 self._map.addfile(f, oldstate, state, mode, size, mtime)
385 self._map.addfile(f, oldstate, state, mode, size, mtime)
386
386
387 def normal(self, f):
387 def normal(self, f):
388 '''Mark a file normal and clean.'''
388 '''Mark a file normal and clean.'''
389 s = os.lstat(self._join(f))
389 s = os.lstat(self._join(f))
390 mtime = s.st_mtime
390 mtime = s[stat.ST_MTIME]
391 self._addpath(f, 'n', s.st_mode,
391 self._addpath(f, 'n', s.st_mode,
392 s.st_size & _rangemask, mtime & _rangemask)
392 s.st_size & _rangemask, mtime & _rangemask)
393 self._map.copymap.pop(f, None)
393 self._map.copymap.pop(f, None)
394 if f in self._map.nonnormalset:
394 if f in self._map.nonnormalset:
395 self._map.nonnormalset.remove(f)
395 self._map.nonnormalset.remove(f)
396 if mtime > self._lastnormaltime:
396 if mtime > self._lastnormaltime:
397 # Remember the most recent modification timeslot for status(),
397 # Remember the most recent modification timeslot for status(),
398 # to make sure we won't miss future size-preserving file content
398 # to make sure we won't miss future size-preserving file content
399 # modifications that happen within the same timeslot.
399 # modifications that happen within the same timeslot.
400 self._lastnormaltime = mtime
400 self._lastnormaltime = mtime
401
401
402 def normallookup(self, f):
402 def normallookup(self, f):
403 '''Mark a file normal, but possibly dirty.'''
403 '''Mark a file normal, but possibly dirty.'''
404 if self._pl[1] != nullid:
404 if self._pl[1] != nullid:
405 # if there is a merge going on and the file was either
405 # if there is a merge going on and the file was either
406 # in state 'm' (-1) or coming from other parent (-2) before
406 # in state 'm' (-1) or coming from other parent (-2) before
407 # being removed, restore that state.
407 # being removed, restore that state.
408 entry = self._map.get(f)
408 entry = self._map.get(f)
409 if entry is not None:
409 if entry is not None:
410 if entry[0] == 'r' and entry[2] in (-1, -2):
410 if entry[0] == 'r' and entry[2] in (-1, -2):
411 source = self._map.copymap.get(f)
411 source = self._map.copymap.get(f)
412 if entry[2] == -1:
412 if entry[2] == -1:
413 self.merge(f)
413 self.merge(f)
414 elif entry[2] == -2:
414 elif entry[2] == -2:
415 self.otherparent(f)
415 self.otherparent(f)
416 if source:
416 if source:
417 self.copy(source, f)
417 self.copy(source, f)
418 return
418 return
419 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
419 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
420 return
420 return
421 self._addpath(f, 'n', 0, -1, -1)
421 self._addpath(f, 'n', 0, -1, -1)
422 self._map.copymap.pop(f, None)
422 self._map.copymap.pop(f, None)
423
423
424 def otherparent(self, f):
424 def otherparent(self, f):
425 '''Mark as coming from the other parent, always dirty.'''
425 '''Mark as coming from the other parent, always dirty.'''
426 if self._pl[1] == nullid:
426 if self._pl[1] == nullid:
427 raise error.Abort(_("setting %r to other parent "
427 raise error.Abort(_("setting %r to other parent "
428 "only allowed in merges") % f)
428 "only allowed in merges") % f)
429 if f in self and self[f] == 'n':
429 if f in self and self[f] == 'n':
430 # merge-like
430 # merge-like
431 self._addpath(f, 'm', 0, -2, -1)
431 self._addpath(f, 'm', 0, -2, -1)
432 else:
432 else:
433 # add-like
433 # add-like
434 self._addpath(f, 'n', 0, -2, -1)
434 self._addpath(f, 'n', 0, -2, -1)
435 self._map.copymap.pop(f, None)
435 self._map.copymap.pop(f, None)
436
436
437 def add(self, f):
437 def add(self, f):
438 '''Mark a file added.'''
438 '''Mark a file added.'''
439 self._addpath(f, 'a', 0, -1, -1)
439 self._addpath(f, 'a', 0, -1, -1)
440 self._map.copymap.pop(f, None)
440 self._map.copymap.pop(f, None)
441
441
442 def remove(self, f):
442 def remove(self, f):
443 '''Mark a file removed.'''
443 '''Mark a file removed.'''
444 self._dirty = True
444 self._dirty = True
445 oldstate = self[f]
445 oldstate = self[f]
446 size = 0
446 size = 0
447 if self._pl[1] != nullid:
447 if self._pl[1] != nullid:
448 entry = self._map.get(f)
448 entry = self._map.get(f)
449 if entry is not None:
449 if entry is not None:
450 # backup the previous state
450 # backup the previous state
451 if entry[0] == 'm': # merge
451 if entry[0] == 'm': # merge
452 size = -1
452 size = -1
453 elif entry[0] == 'n' and entry[2] == -2: # other parent
453 elif entry[0] == 'n' and entry[2] == -2: # other parent
454 size = -2
454 size = -2
455 self._map.otherparentset.add(f)
455 self._map.otherparentset.add(f)
456 self._updatedfiles.add(f)
456 self._updatedfiles.add(f)
457 self._map.removefile(f, oldstate, size)
457 self._map.removefile(f, oldstate, size)
458 if size == 0:
458 if size == 0:
459 self._map.copymap.pop(f, None)
459 self._map.copymap.pop(f, None)
460
460
461 def merge(self, f):
461 def merge(self, f):
462 '''Mark a file merged.'''
462 '''Mark a file merged.'''
463 if self._pl[1] == nullid:
463 if self._pl[1] == nullid:
464 return self.normallookup(f)
464 return self.normallookup(f)
465 return self.otherparent(f)
465 return self.otherparent(f)
466
466
467 def drop(self, f):
467 def drop(self, f):
468 '''Drop a file from the dirstate'''
468 '''Drop a file from the dirstate'''
469 oldstate = self[f]
469 oldstate = self[f]
470 if self._map.dropfile(f, oldstate):
470 if self._map.dropfile(f, oldstate):
471 self._dirty = True
471 self._dirty = True
472 self._updatedfiles.add(f)
472 self._updatedfiles.add(f)
473 self._map.copymap.pop(f, None)
473 self._map.copymap.pop(f, None)
474
474
475 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
475 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
476 if exists is None:
476 if exists is None:
477 exists = os.path.lexists(os.path.join(self._root, path))
477 exists = os.path.lexists(os.path.join(self._root, path))
478 if not exists:
478 if not exists:
479 # Maybe a path component exists
479 # Maybe a path component exists
480 if not ignoremissing and '/' in path:
480 if not ignoremissing and '/' in path:
481 d, f = path.rsplit('/', 1)
481 d, f = path.rsplit('/', 1)
482 d = self._normalize(d, False, ignoremissing, None)
482 d = self._normalize(d, False, ignoremissing, None)
483 folded = d + "/" + f
483 folded = d + "/" + f
484 else:
484 else:
485 # No path components, preserve original case
485 # No path components, preserve original case
486 folded = path
486 folded = path
487 else:
487 else:
488 # recursively normalize leading directory components
488 # recursively normalize leading directory components
489 # against dirstate
489 # against dirstate
490 if '/' in normed:
490 if '/' in normed:
491 d, f = normed.rsplit('/', 1)
491 d, f = normed.rsplit('/', 1)
492 d = self._normalize(d, False, ignoremissing, True)
492 d = self._normalize(d, False, ignoremissing, True)
493 r = self._root + "/" + d
493 r = self._root + "/" + d
494 folded = d + "/" + util.fspath(f, r)
494 folded = d + "/" + util.fspath(f, r)
495 else:
495 else:
496 folded = util.fspath(normed, self._root)
496 folded = util.fspath(normed, self._root)
497 storemap[normed] = folded
497 storemap[normed] = folded
498
498
499 return folded
499 return folded
500
500
501 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
501 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
502 normed = util.normcase(path)
502 normed = util.normcase(path)
503 folded = self._map.filefoldmap.get(normed, None)
503 folded = self._map.filefoldmap.get(normed, None)
504 if folded is None:
504 if folded is None:
505 if isknown:
505 if isknown:
506 folded = path
506 folded = path
507 else:
507 else:
508 folded = self._discoverpath(path, normed, ignoremissing, exists,
508 folded = self._discoverpath(path, normed, ignoremissing, exists,
509 self._map.filefoldmap)
509 self._map.filefoldmap)
510 return folded
510 return folded
511
511
512 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
512 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
513 normed = util.normcase(path)
513 normed = util.normcase(path)
514 folded = self._map.filefoldmap.get(normed, None)
514 folded = self._map.filefoldmap.get(normed, None)
515 if folded is None:
515 if folded is None:
516 folded = self._map.dirfoldmap.get(normed, None)
516 folded = self._map.dirfoldmap.get(normed, None)
517 if folded is None:
517 if folded is None:
518 if isknown:
518 if isknown:
519 folded = path
519 folded = path
520 else:
520 else:
521 # store discovered result in dirfoldmap so that future
521 # store discovered result in dirfoldmap so that future
522 # normalizefile calls don't start matching directories
522 # normalizefile calls don't start matching directories
523 folded = self._discoverpath(path, normed, ignoremissing, exists,
523 folded = self._discoverpath(path, normed, ignoremissing, exists,
524 self._map.dirfoldmap)
524 self._map.dirfoldmap)
525 return folded
525 return folded
526
526
527 def normalize(self, path, isknown=False, ignoremissing=False):
527 def normalize(self, path, isknown=False, ignoremissing=False):
528 '''
528 '''
529 normalize the case of a pathname when on a casefolding filesystem
529 normalize the case of a pathname when on a casefolding filesystem
530
530
531 isknown specifies whether the filename came from walking the
531 isknown specifies whether the filename came from walking the
532 disk, to avoid extra filesystem access.
532 disk, to avoid extra filesystem access.
533
533
534 If ignoremissing is True, missing path are returned
534 If ignoremissing is True, missing path are returned
535 unchanged. Otherwise, we try harder to normalize possibly
535 unchanged. Otherwise, we try harder to normalize possibly
536 existing path components.
536 existing path components.
537
537
538 The normalized case is determined based on the following precedence:
538 The normalized case is determined based on the following precedence:
539
539
540 - version of name already stored in the dirstate
540 - version of name already stored in the dirstate
541 - version of name stored on disk
541 - version of name stored on disk
542 - version provided via command arguments
542 - version provided via command arguments
543 '''
543 '''
544
544
545 if self._checkcase:
545 if self._checkcase:
546 return self._normalize(path, isknown, ignoremissing)
546 return self._normalize(path, isknown, ignoremissing)
547 return path
547 return path
548
548
549 def clear(self):
549 def clear(self):
550 self._map.clear()
550 self._map.clear()
551 self._lastnormaltime = 0
551 self._lastnormaltime = 0
552 self._updatedfiles.clear()
552 self._updatedfiles.clear()
553 self._dirty = True
553 self._dirty = True
554
554
555 def rebuild(self, parent, allfiles, changedfiles=None):
555 def rebuild(self, parent, allfiles, changedfiles=None):
556 if changedfiles is None:
556 if changedfiles is None:
557 # Rebuild entire dirstate
557 # Rebuild entire dirstate
558 changedfiles = allfiles
558 changedfiles = allfiles
559 lastnormaltime = self._lastnormaltime
559 lastnormaltime = self._lastnormaltime
560 self.clear()
560 self.clear()
561 self._lastnormaltime = lastnormaltime
561 self._lastnormaltime = lastnormaltime
562
562
563 if self._origpl is None:
563 if self._origpl is None:
564 self._origpl = self._pl
564 self._origpl = self._pl
565 self._map.setparents(parent, nullid)
565 self._map.setparents(parent, nullid)
566 for f in changedfiles:
566 for f in changedfiles:
567 if f in allfiles:
567 if f in allfiles:
568 self.normallookup(f)
568 self.normallookup(f)
569 else:
569 else:
570 self.drop(f)
570 self.drop(f)
571
571
572 self._dirty = True
572 self._dirty = True
573
573
574 def identity(self):
574 def identity(self):
575 '''Return identity of dirstate itself to detect changing in storage
575 '''Return identity of dirstate itself to detect changing in storage
576
576
577 If identity of previous dirstate is equal to this, writing
577 If identity of previous dirstate is equal to this, writing
578 changes based on the former dirstate out can keep consistency.
578 changes based on the former dirstate out can keep consistency.
579 '''
579 '''
580 return self._map.identity
580 return self._map.identity
581
581
582 def write(self, tr):
582 def write(self, tr):
583 if not self._dirty:
583 if not self._dirty:
584 return
584 return
585
585
586 filename = self._filename
586 filename = self._filename
587 if tr:
587 if tr:
588 # 'dirstate.write()' is not only for writing in-memory
588 # 'dirstate.write()' is not only for writing in-memory
589 # changes out, but also for dropping ambiguous timestamp.
589 # changes out, but also for dropping ambiguous timestamp.
590 # delayed writing re-raise "ambiguous timestamp issue".
590 # delayed writing re-raise "ambiguous timestamp issue".
591 # See also the wiki page below for detail:
591 # See also the wiki page below for detail:
592 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
592 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
593
593
594 # emulate dropping timestamp in 'parsers.pack_dirstate'
594 # emulate dropping timestamp in 'parsers.pack_dirstate'
595 now = _getfsnow(self._opener)
595 now = _getfsnow(self._opener)
596 self._map.clearambiguoustimes(self._updatedfiles, now)
596 self._map.clearambiguoustimes(self._updatedfiles, now)
597
597
598 # emulate that all 'dirstate.normal' results are written out
598 # emulate that all 'dirstate.normal' results are written out
599 self._lastnormaltime = 0
599 self._lastnormaltime = 0
600 self._updatedfiles.clear()
600 self._updatedfiles.clear()
601
601
602 # delay writing in-memory changes out
602 # delay writing in-memory changes out
603 tr.addfilegenerator('dirstate', (self._filename,),
603 tr.addfilegenerator('dirstate', (self._filename,),
604 self._writedirstate, location='plain')
604 self._writedirstate, location='plain')
605 return
605 return
606
606
607 st = self._opener(filename, "w", atomictemp=True, checkambig=True)
607 st = self._opener(filename, "w", atomictemp=True, checkambig=True)
608 self._writedirstate(st)
608 self._writedirstate(st)
609
609
610 def addparentchangecallback(self, category, callback):
610 def addparentchangecallback(self, category, callback):
611 """add a callback to be called when the wd parents are changed
611 """add a callback to be called when the wd parents are changed
612
612
613 Callback will be called with the following arguments:
613 Callback will be called with the following arguments:
614 dirstate, (oldp1, oldp2), (newp1, newp2)
614 dirstate, (oldp1, oldp2), (newp1, newp2)
615
615
616 Category is a unique identifier to allow overwriting an old callback
616 Category is a unique identifier to allow overwriting an old callback
617 with a newer callback.
617 with a newer callback.
618 """
618 """
619 self._plchangecallbacks[category] = callback
619 self._plchangecallbacks[category] = callback
620
620
621 def _writedirstate(self, st):
621 def _writedirstate(self, st):
622 # notify callbacks about parents change
622 # notify callbacks about parents change
623 if self._origpl is not None and self._origpl != self._pl:
623 if self._origpl is not None and self._origpl != self._pl:
624 for c, callback in sorted(self._plchangecallbacks.iteritems()):
624 for c, callback in sorted(self._plchangecallbacks.iteritems()):
625 callback(self, self._origpl, self._pl)
625 callback(self, self._origpl, self._pl)
626 self._origpl = None
626 self._origpl = None
627 # use the modification time of the newly created temporary file as the
627 # use the modification time of the newly created temporary file as the
628 # filesystem's notion of 'now'
628 # filesystem's notion of 'now'
629 now = util.fstat(st).st_mtime & _rangemask
629 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
630
630
631 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
631 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
632 # timestamp of each entries in dirstate, because of 'now > mtime'
632 # timestamp of each entries in dirstate, because of 'now > mtime'
633 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite')
633 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite')
634 if delaywrite > 0:
634 if delaywrite > 0:
635 # do we have any files to delay for?
635 # do we have any files to delay for?
636 for f, e in self._map.iteritems():
636 for f, e in self._map.iteritems():
637 if e[0] == 'n' and e[3] == now:
637 if e[0] == 'n' and e[3] == now:
638 import time # to avoid useless import
638 import time # to avoid useless import
639 # rather than sleep n seconds, sleep until the next
639 # rather than sleep n seconds, sleep until the next
640 # multiple of n seconds
640 # multiple of n seconds
641 clock = time.time()
641 clock = time.time()
642 start = int(clock) - (int(clock) % delaywrite)
642 start = int(clock) - (int(clock) % delaywrite)
643 end = start + delaywrite
643 end = start + delaywrite
644 time.sleep(end - clock)
644 time.sleep(end - clock)
645 now = end # trust our estimate that the end is near now
645 now = end # trust our estimate that the end is near now
646 break
646 break
647
647
648 self._map.write(st, now)
648 self._map.write(st, now)
649 self._lastnormaltime = 0
649 self._lastnormaltime = 0
650 self._dirty = False
650 self._dirty = False
651
651
652 def _dirignore(self, f):
652 def _dirignore(self, f):
653 if f == '.':
653 if f == '.':
654 return False
654 return False
655 if self._ignore(f):
655 if self._ignore(f):
656 return True
656 return True
657 for p in util.finddirs(f):
657 for p in util.finddirs(f):
658 if self._ignore(p):
658 if self._ignore(p):
659 return True
659 return True
660 return False
660 return False
661
661
662 def _ignorefiles(self):
662 def _ignorefiles(self):
663 files = []
663 files = []
664 if os.path.exists(self._join('.hgignore')):
664 if os.path.exists(self._join('.hgignore')):
665 files.append(self._join('.hgignore'))
665 files.append(self._join('.hgignore'))
666 for name, path in self._ui.configitems("ui"):
666 for name, path in self._ui.configitems("ui"):
667 if name == 'ignore' or name.startswith('ignore.'):
667 if name == 'ignore' or name.startswith('ignore.'):
668 # we need to use os.path.join here rather than self._join
668 # we need to use os.path.join here rather than self._join
669 # because path is arbitrary and user-specified
669 # because path is arbitrary and user-specified
670 files.append(os.path.join(self._rootdir, util.expandpath(path)))
670 files.append(os.path.join(self._rootdir, util.expandpath(path)))
671 return files
671 return files
672
672
673 def _ignorefileandline(self, f):
673 def _ignorefileandline(self, f):
674 files = collections.deque(self._ignorefiles())
674 files = collections.deque(self._ignorefiles())
675 visited = set()
675 visited = set()
676 while files:
676 while files:
677 i = files.popleft()
677 i = files.popleft()
678 patterns = matchmod.readpatternfile(i, self._ui.warn,
678 patterns = matchmod.readpatternfile(i, self._ui.warn,
679 sourceinfo=True)
679 sourceinfo=True)
680 for pattern, lineno, line in patterns:
680 for pattern, lineno, line in patterns:
681 kind, p = matchmod._patsplit(pattern, 'glob')
681 kind, p = matchmod._patsplit(pattern, 'glob')
682 if kind == "subinclude":
682 if kind == "subinclude":
683 if p not in visited:
683 if p not in visited:
684 files.append(p)
684 files.append(p)
685 continue
685 continue
686 m = matchmod.match(self._root, '', [], [pattern],
686 m = matchmod.match(self._root, '', [], [pattern],
687 warn=self._ui.warn)
687 warn=self._ui.warn)
688 if m(f):
688 if m(f):
689 return (i, lineno, line)
689 return (i, lineno, line)
690 visited.add(i)
690 visited.add(i)
691 return (None, -1, "")
691 return (None, -1, "")
692
692
693 def _walkexplicit(self, match, subrepos):
693 def _walkexplicit(self, match, subrepos):
694 '''Get stat data about the files explicitly specified by match.
694 '''Get stat data about the files explicitly specified by match.
695
695
696 Return a triple (results, dirsfound, dirsnotfound).
696 Return a triple (results, dirsfound, dirsnotfound).
697 - results is a mapping from filename to stat result. It also contains
697 - results is a mapping from filename to stat result. It also contains
698 listings mapping subrepos and .hg to None.
698 listings mapping subrepos and .hg to None.
699 - dirsfound is a list of files found to be directories.
699 - dirsfound is a list of files found to be directories.
700 - dirsnotfound is a list of files that the dirstate thinks are
700 - dirsnotfound is a list of files that the dirstate thinks are
701 directories and that were not found.'''
701 directories and that were not found.'''
702
702
703 def badtype(mode):
703 def badtype(mode):
704 kind = _('unknown')
704 kind = _('unknown')
705 if stat.S_ISCHR(mode):
705 if stat.S_ISCHR(mode):
706 kind = _('character device')
706 kind = _('character device')
707 elif stat.S_ISBLK(mode):
707 elif stat.S_ISBLK(mode):
708 kind = _('block device')
708 kind = _('block device')
709 elif stat.S_ISFIFO(mode):
709 elif stat.S_ISFIFO(mode):
710 kind = _('fifo')
710 kind = _('fifo')
711 elif stat.S_ISSOCK(mode):
711 elif stat.S_ISSOCK(mode):
712 kind = _('socket')
712 kind = _('socket')
713 elif stat.S_ISDIR(mode):
713 elif stat.S_ISDIR(mode):
714 kind = _('directory')
714 kind = _('directory')
715 return _('unsupported file type (type is %s)') % kind
715 return _('unsupported file type (type is %s)') % kind
716
716
717 matchedir = match.explicitdir
717 matchedir = match.explicitdir
718 badfn = match.bad
718 badfn = match.bad
719 dmap = self._map
719 dmap = self._map
720 lstat = os.lstat
720 lstat = os.lstat
721 getkind = stat.S_IFMT
721 getkind = stat.S_IFMT
722 dirkind = stat.S_IFDIR
722 dirkind = stat.S_IFDIR
723 regkind = stat.S_IFREG
723 regkind = stat.S_IFREG
724 lnkkind = stat.S_IFLNK
724 lnkkind = stat.S_IFLNK
725 join = self._join
725 join = self._join
726 dirsfound = []
726 dirsfound = []
727 foundadd = dirsfound.append
727 foundadd = dirsfound.append
728 dirsnotfound = []
728 dirsnotfound = []
729 notfoundadd = dirsnotfound.append
729 notfoundadd = dirsnotfound.append
730
730
731 if not match.isexact() and self._checkcase:
731 if not match.isexact() and self._checkcase:
732 normalize = self._normalize
732 normalize = self._normalize
733 else:
733 else:
734 normalize = None
734 normalize = None
735
735
736 files = sorted(match.files())
736 files = sorted(match.files())
737 subrepos.sort()
737 subrepos.sort()
738 i, j = 0, 0
738 i, j = 0, 0
739 while i < len(files) and j < len(subrepos):
739 while i < len(files) and j < len(subrepos):
740 subpath = subrepos[j] + "/"
740 subpath = subrepos[j] + "/"
741 if files[i] < subpath:
741 if files[i] < subpath:
742 i += 1
742 i += 1
743 continue
743 continue
744 while i < len(files) and files[i].startswith(subpath):
744 while i < len(files) and files[i].startswith(subpath):
745 del files[i]
745 del files[i]
746 j += 1
746 j += 1
747
747
748 if not files or '.' in files:
748 if not files or '.' in files:
749 files = ['.']
749 files = ['.']
750 results = dict.fromkeys(subrepos)
750 results = dict.fromkeys(subrepos)
751 results['.hg'] = None
751 results['.hg'] = None
752
752
753 for ff in files:
753 for ff in files:
754 # constructing the foldmap is expensive, so don't do it for the
754 # constructing the foldmap is expensive, so don't do it for the
755 # common case where files is ['.']
755 # common case where files is ['.']
756 if normalize and ff != '.':
756 if normalize and ff != '.':
757 nf = normalize(ff, False, True)
757 nf = normalize(ff, False, True)
758 else:
758 else:
759 nf = ff
759 nf = ff
760 if nf in results:
760 if nf in results:
761 continue
761 continue
762
762
763 try:
763 try:
764 st = lstat(join(nf))
764 st = lstat(join(nf))
765 kind = getkind(st.st_mode)
765 kind = getkind(st.st_mode)
766 if kind == dirkind:
766 if kind == dirkind:
767 if nf in dmap:
767 if nf in dmap:
768 # file replaced by dir on disk but still in dirstate
768 # file replaced by dir on disk but still in dirstate
769 results[nf] = None
769 results[nf] = None
770 if matchedir:
770 if matchedir:
771 matchedir(nf)
771 matchedir(nf)
772 foundadd((nf, ff))
772 foundadd((nf, ff))
773 elif kind == regkind or kind == lnkkind:
773 elif kind == regkind or kind == lnkkind:
774 results[nf] = st
774 results[nf] = st
775 else:
775 else:
776 badfn(ff, badtype(kind))
776 badfn(ff, badtype(kind))
777 if nf in dmap:
777 if nf in dmap:
778 results[nf] = None
778 results[nf] = None
779 except OSError as inst: # nf not found on disk - it is dirstate only
779 except OSError as inst: # nf not found on disk - it is dirstate only
780 if nf in dmap: # does it exactly match a missing file?
780 if nf in dmap: # does it exactly match a missing file?
781 results[nf] = None
781 results[nf] = None
782 else: # does it match a missing directory?
782 else: # does it match a missing directory?
783 if self._map.hasdir(nf):
783 if self._map.hasdir(nf):
784 if matchedir:
784 if matchedir:
785 matchedir(nf)
785 matchedir(nf)
786 notfoundadd(nf)
786 notfoundadd(nf)
787 else:
787 else:
788 badfn(ff, encoding.strtolocal(inst.strerror))
788 badfn(ff, encoding.strtolocal(inst.strerror))
789
789
790 # match.files() may contain explicitly-specified paths that shouldn't
790 # match.files() may contain explicitly-specified paths that shouldn't
791 # be taken; drop them from the list of files found. dirsfound/notfound
791 # be taken; drop them from the list of files found. dirsfound/notfound
792 # aren't filtered here because they will be tested later.
792 # aren't filtered here because they will be tested later.
793 if match.anypats():
793 if match.anypats():
794 for f in list(results):
794 for f in list(results):
795 if f == '.hg' or f in subrepos:
795 if f == '.hg' or f in subrepos:
796 # keep sentinel to disable further out-of-repo walks
796 # keep sentinel to disable further out-of-repo walks
797 continue
797 continue
798 if not match(f):
798 if not match(f):
799 del results[f]
799 del results[f]
800
800
801 # Case insensitive filesystems cannot rely on lstat() failing to detect
801 # Case insensitive filesystems cannot rely on lstat() failing to detect
802 # a case-only rename. Prune the stat object for any file that does not
802 # a case-only rename. Prune the stat object for any file that does not
803 # match the case in the filesystem, if there are multiple files that
803 # match the case in the filesystem, if there are multiple files that
804 # normalize to the same path.
804 # normalize to the same path.
805 if match.isexact() and self._checkcase:
805 if match.isexact() and self._checkcase:
806 normed = {}
806 normed = {}
807
807
808 for f, st in results.iteritems():
808 for f, st in results.iteritems():
809 if st is None:
809 if st is None:
810 continue
810 continue
811
811
812 nc = util.normcase(f)
812 nc = util.normcase(f)
813 paths = normed.get(nc)
813 paths = normed.get(nc)
814
814
815 if paths is None:
815 if paths is None:
816 paths = set()
816 paths = set()
817 normed[nc] = paths
817 normed[nc] = paths
818
818
819 paths.add(f)
819 paths.add(f)
820
820
821 for norm, paths in normed.iteritems():
821 for norm, paths in normed.iteritems():
822 if len(paths) > 1:
822 if len(paths) > 1:
823 for path in paths:
823 for path in paths:
824 folded = self._discoverpath(path, norm, True, None,
824 folded = self._discoverpath(path, norm, True, None,
825 self._map.dirfoldmap)
825 self._map.dirfoldmap)
826 if path != folded:
826 if path != folded:
827 results[path] = None
827 results[path] = None
828
828
829 return results, dirsfound, dirsnotfound
829 return results, dirsfound, dirsnotfound
830
830
831 def walk(self, match, subrepos, unknown, ignored, full=True):
831 def walk(self, match, subrepos, unknown, ignored, full=True):
832 '''
832 '''
833 Walk recursively through the directory tree, finding all files
833 Walk recursively through the directory tree, finding all files
834 matched by match.
834 matched by match.
835
835
836 If full is False, maybe skip some known-clean files.
836 If full is False, maybe skip some known-clean files.
837
837
838 Return a dict mapping filename to stat-like object (either
838 Return a dict mapping filename to stat-like object (either
839 mercurial.osutil.stat instance or return value of os.stat()).
839 mercurial.osutil.stat instance or return value of os.stat()).
840
840
841 '''
841 '''
842 # full is a flag that extensions that hook into walk can use -- this
842 # full is a flag that extensions that hook into walk can use -- this
843 # implementation doesn't use it at all. This satisfies the contract
843 # implementation doesn't use it at all. This satisfies the contract
844 # because we only guarantee a "maybe".
844 # because we only guarantee a "maybe".
845
845
846 if ignored:
846 if ignored:
847 ignore = util.never
847 ignore = util.never
848 dirignore = util.never
848 dirignore = util.never
849 elif unknown:
849 elif unknown:
850 ignore = self._ignore
850 ignore = self._ignore
851 dirignore = self._dirignore
851 dirignore = self._dirignore
852 else:
852 else:
853 # if not unknown and not ignored, drop dir recursion and step 2
853 # if not unknown and not ignored, drop dir recursion and step 2
854 ignore = util.always
854 ignore = util.always
855 dirignore = util.always
855 dirignore = util.always
856
856
857 matchfn = match.matchfn
857 matchfn = match.matchfn
858 matchalways = match.always()
858 matchalways = match.always()
859 matchtdir = match.traversedir
859 matchtdir = match.traversedir
860 dmap = self._map
860 dmap = self._map
861 listdir = util.listdir
861 listdir = util.listdir
862 lstat = os.lstat
862 lstat = os.lstat
863 dirkind = stat.S_IFDIR
863 dirkind = stat.S_IFDIR
864 regkind = stat.S_IFREG
864 regkind = stat.S_IFREG
865 lnkkind = stat.S_IFLNK
865 lnkkind = stat.S_IFLNK
866 join = self._join
866 join = self._join
867
867
868 exact = skipstep3 = False
868 exact = skipstep3 = False
869 if match.isexact(): # match.exact
869 if match.isexact(): # match.exact
870 exact = True
870 exact = True
871 dirignore = util.always # skip step 2
871 dirignore = util.always # skip step 2
872 elif match.prefix(): # match.match, no patterns
872 elif match.prefix(): # match.match, no patterns
873 skipstep3 = True
873 skipstep3 = True
874
874
875 if not exact and self._checkcase:
875 if not exact and self._checkcase:
876 normalize = self._normalize
876 normalize = self._normalize
877 normalizefile = self._normalizefile
877 normalizefile = self._normalizefile
878 skipstep3 = False
878 skipstep3 = False
879 else:
879 else:
880 normalize = self._normalize
880 normalize = self._normalize
881 normalizefile = None
881 normalizefile = None
882
882
883 # step 1: find all explicit files
883 # step 1: find all explicit files
884 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
884 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
885
885
886 skipstep3 = skipstep3 and not (work or dirsnotfound)
886 skipstep3 = skipstep3 and not (work or dirsnotfound)
887 work = [d for d in work if not dirignore(d[0])]
887 work = [d for d in work if not dirignore(d[0])]
888
888
889 # step 2: visit subdirectories
889 # step 2: visit subdirectories
890 def traverse(work, alreadynormed):
890 def traverse(work, alreadynormed):
891 wadd = work.append
891 wadd = work.append
892 while work:
892 while work:
893 nd = work.pop()
893 nd = work.pop()
894 if not match.visitdir(nd):
894 if not match.visitdir(nd):
895 continue
895 continue
896 skip = None
896 skip = None
897 if nd == '.':
897 if nd == '.':
898 nd = ''
898 nd = ''
899 else:
899 else:
900 skip = '.hg'
900 skip = '.hg'
901 try:
901 try:
902 entries = listdir(join(nd), stat=True, skip=skip)
902 entries = listdir(join(nd), stat=True, skip=skip)
903 except OSError as inst:
903 except OSError as inst:
904 if inst.errno in (errno.EACCES, errno.ENOENT):
904 if inst.errno in (errno.EACCES, errno.ENOENT):
905 match.bad(self.pathto(nd),
905 match.bad(self.pathto(nd),
906 encoding.strtolocal(inst.strerror))
906 encoding.strtolocal(inst.strerror))
907 continue
907 continue
908 raise
908 raise
909 for f, kind, st in entries:
909 for f, kind, st in entries:
910 if normalizefile:
910 if normalizefile:
911 # even though f might be a directory, we're only
911 # even though f might be a directory, we're only
912 # interested in comparing it to files currently in the
912 # interested in comparing it to files currently in the
913 # dmap -- therefore normalizefile is enough
913 # dmap -- therefore normalizefile is enough
914 nf = normalizefile(nd and (nd + "/" + f) or f, True,
914 nf = normalizefile(nd and (nd + "/" + f) or f, True,
915 True)
915 True)
916 else:
916 else:
917 nf = nd and (nd + "/" + f) or f
917 nf = nd and (nd + "/" + f) or f
918 if nf not in results:
918 if nf not in results:
919 if kind == dirkind:
919 if kind == dirkind:
920 if not ignore(nf):
920 if not ignore(nf):
921 if matchtdir:
921 if matchtdir:
922 matchtdir(nf)
922 matchtdir(nf)
923 wadd(nf)
923 wadd(nf)
924 if nf in dmap and (matchalways or matchfn(nf)):
924 if nf in dmap and (matchalways or matchfn(nf)):
925 results[nf] = None
925 results[nf] = None
926 elif kind == regkind or kind == lnkkind:
926 elif kind == regkind or kind == lnkkind:
927 if nf in dmap:
927 if nf in dmap:
928 if matchalways or matchfn(nf):
928 if matchalways or matchfn(nf):
929 results[nf] = st
929 results[nf] = st
930 elif ((matchalways or matchfn(nf))
930 elif ((matchalways or matchfn(nf))
931 and not ignore(nf)):
931 and not ignore(nf)):
932 # unknown file -- normalize if necessary
932 # unknown file -- normalize if necessary
933 if not alreadynormed:
933 if not alreadynormed:
934 nf = normalize(nf, False, True)
934 nf = normalize(nf, False, True)
935 results[nf] = st
935 results[nf] = st
936 elif nf in dmap and (matchalways or matchfn(nf)):
936 elif nf in dmap and (matchalways or matchfn(nf)):
937 results[nf] = None
937 results[nf] = None
938
938
939 for nd, d in work:
939 for nd, d in work:
940 # alreadynormed means that processwork doesn't have to do any
940 # alreadynormed means that processwork doesn't have to do any
941 # expensive directory normalization
941 # expensive directory normalization
942 alreadynormed = not normalize or nd == d
942 alreadynormed = not normalize or nd == d
943 traverse([d], alreadynormed)
943 traverse([d], alreadynormed)
944
944
945 for s in subrepos:
945 for s in subrepos:
946 del results[s]
946 del results[s]
947 del results['.hg']
947 del results['.hg']
948
948
949 # step 3: visit remaining files from dmap
949 # step 3: visit remaining files from dmap
950 if not skipstep3 and not exact:
950 if not skipstep3 and not exact:
951 # If a dmap file is not in results yet, it was either
951 # If a dmap file is not in results yet, it was either
952 # a) not matching matchfn b) ignored, c) missing, or d) under a
952 # a) not matching matchfn b) ignored, c) missing, or d) under a
953 # symlink directory.
953 # symlink directory.
954 if not results and matchalways:
954 if not results and matchalways:
955 visit = [f for f in dmap]
955 visit = [f for f in dmap]
956 else:
956 else:
957 visit = [f for f in dmap if f not in results and matchfn(f)]
957 visit = [f for f in dmap if f not in results and matchfn(f)]
958 visit.sort()
958 visit.sort()
959
959
960 if unknown:
960 if unknown:
961 # unknown == True means we walked all dirs under the roots
961 # unknown == True means we walked all dirs under the roots
962 # that wasn't ignored, and everything that matched was stat'ed
962 # that wasn't ignored, and everything that matched was stat'ed
963 # and is already in results.
963 # and is already in results.
964 # The rest must thus be ignored or under a symlink.
964 # The rest must thus be ignored or under a symlink.
965 audit_path = pathutil.pathauditor(self._root, cached=True)
965 audit_path = pathutil.pathauditor(self._root, cached=True)
966
966
967 for nf in iter(visit):
967 for nf in iter(visit):
968 # If a stat for the same file was already added with a
968 # If a stat for the same file was already added with a
969 # different case, don't add one for this, since that would
969 # different case, don't add one for this, since that would
970 # make it appear as if the file exists under both names
970 # make it appear as if the file exists under both names
971 # on disk.
971 # on disk.
972 if (normalizefile and
972 if (normalizefile and
973 normalizefile(nf, True, True) in results):
973 normalizefile(nf, True, True) in results):
974 results[nf] = None
974 results[nf] = None
975 # Report ignored items in the dmap as long as they are not
975 # Report ignored items in the dmap as long as they are not
976 # under a symlink directory.
976 # under a symlink directory.
977 elif audit_path.check(nf):
977 elif audit_path.check(nf):
978 try:
978 try:
979 results[nf] = lstat(join(nf))
979 results[nf] = lstat(join(nf))
980 # file was just ignored, no links, and exists
980 # file was just ignored, no links, and exists
981 except OSError:
981 except OSError:
982 # file doesn't exist
982 # file doesn't exist
983 results[nf] = None
983 results[nf] = None
984 else:
984 else:
985 # It's either missing or under a symlink directory
985 # It's either missing or under a symlink directory
986 # which we in this case report as missing
986 # which we in this case report as missing
987 results[nf] = None
987 results[nf] = None
988 else:
988 else:
989 # We may not have walked the full directory tree above,
989 # We may not have walked the full directory tree above,
990 # so stat and check everything we missed.
990 # so stat and check everything we missed.
991 iv = iter(visit)
991 iv = iter(visit)
992 for st in util.statfiles([join(i) for i in visit]):
992 for st in util.statfiles([join(i) for i in visit]):
993 results[next(iv)] = st
993 results[next(iv)] = st
994 return results
994 return results
995
995
996 def status(self, match, subrepos, ignored, clean, unknown):
996 def status(self, match, subrepos, ignored, clean, unknown):
997 '''Determine the status of the working copy relative to the
997 '''Determine the status of the working copy relative to the
998 dirstate and return a pair of (unsure, status), where status is of type
998 dirstate and return a pair of (unsure, status), where status is of type
999 scmutil.status and:
999 scmutil.status and:
1000
1000
1001 unsure:
1001 unsure:
1002 files that might have been modified since the dirstate was
1002 files that might have been modified since the dirstate was
1003 written, but need to be read to be sure (size is the same
1003 written, but need to be read to be sure (size is the same
1004 but mtime differs)
1004 but mtime differs)
1005 status.modified:
1005 status.modified:
1006 files that have definitely been modified since the dirstate
1006 files that have definitely been modified since the dirstate
1007 was written (different size or mode)
1007 was written (different size or mode)
1008 status.clean:
1008 status.clean:
1009 files that have definitely not been modified since the
1009 files that have definitely not been modified since the
1010 dirstate was written
1010 dirstate was written
1011 '''
1011 '''
1012 listignored, listclean, listunknown = ignored, clean, unknown
1012 listignored, listclean, listunknown = ignored, clean, unknown
1013 lookup, modified, added, unknown, ignored = [], [], [], [], []
1013 lookup, modified, added, unknown, ignored = [], [], [], [], []
1014 removed, deleted, clean = [], [], []
1014 removed, deleted, clean = [], [], []
1015
1015
1016 dmap = self._map
1016 dmap = self._map
1017 dmap.preload()
1017 dmap.preload()
1018 dcontains = dmap.__contains__
1018 dcontains = dmap.__contains__
1019 dget = dmap.__getitem__
1019 dget = dmap.__getitem__
1020 ladd = lookup.append # aka "unsure"
1020 ladd = lookup.append # aka "unsure"
1021 madd = modified.append
1021 madd = modified.append
1022 aadd = added.append
1022 aadd = added.append
1023 uadd = unknown.append
1023 uadd = unknown.append
1024 iadd = ignored.append
1024 iadd = ignored.append
1025 radd = removed.append
1025 radd = removed.append
1026 dadd = deleted.append
1026 dadd = deleted.append
1027 cadd = clean.append
1027 cadd = clean.append
1028 mexact = match.exact
1028 mexact = match.exact
1029 dirignore = self._dirignore
1029 dirignore = self._dirignore
1030 checkexec = self._checkexec
1030 checkexec = self._checkexec
1031 copymap = self._map.copymap
1031 copymap = self._map.copymap
1032 lastnormaltime = self._lastnormaltime
1032 lastnormaltime = self._lastnormaltime
1033
1033
1034 # We need to do full walks when either
1034 # We need to do full walks when either
1035 # - we're listing all clean files, or
1035 # - we're listing all clean files, or
1036 # - match.traversedir does something, because match.traversedir should
1036 # - match.traversedir does something, because match.traversedir should
1037 # be called for every dir in the working dir
1037 # be called for every dir in the working dir
1038 full = listclean or match.traversedir is not None
1038 full = listclean or match.traversedir is not None
1039 for fn, st in self.walk(match, subrepos, listunknown, listignored,
1039 for fn, st in self.walk(match, subrepos, listunknown, listignored,
1040 full=full).iteritems():
1040 full=full).iteritems():
1041 if not dcontains(fn):
1041 if not dcontains(fn):
1042 if (listignored or mexact(fn)) and dirignore(fn):
1042 if (listignored or mexact(fn)) and dirignore(fn):
1043 if listignored:
1043 if listignored:
1044 iadd(fn)
1044 iadd(fn)
1045 else:
1045 else:
1046 uadd(fn)
1046 uadd(fn)
1047 continue
1047 continue
1048
1048
1049 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1049 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1050 # written like that for performance reasons. dmap[fn] is not a
1050 # written like that for performance reasons. dmap[fn] is not a
1051 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1051 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1052 # opcode has fast paths when the value to be unpacked is a tuple or
1052 # opcode has fast paths when the value to be unpacked is a tuple or
1053 # a list, but falls back to creating a full-fledged iterator in
1053 # a list, but falls back to creating a full-fledged iterator in
1054 # general. That is much slower than simply accessing and storing the
1054 # general. That is much slower than simply accessing and storing the
1055 # tuple members one by one.
1055 # tuple members one by one.
1056 t = dget(fn)
1056 t = dget(fn)
1057 state = t[0]
1057 state = t[0]
1058 mode = t[1]
1058 mode = t[1]
1059 size = t[2]
1059 size = t[2]
1060 time = t[3]
1060 time = t[3]
1061
1061
1062 if not st and state in "nma":
1062 if not st and state in "nma":
1063 dadd(fn)
1063 dadd(fn)
1064 elif state == 'n':
1064 elif state == 'n':
1065 if (size >= 0 and
1065 if (size >= 0 and
1066 ((size != st.st_size and size != st.st_size & _rangemask)
1066 ((size != st.st_size and size != st.st_size & _rangemask)
1067 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1067 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1068 or size == -2 # other parent
1068 or size == -2 # other parent
1069 or fn in copymap):
1069 or fn in copymap):
1070 madd(fn)
1070 madd(fn)
1071 elif time != st.st_mtime and time != st.st_mtime & _rangemask:
1071 elif (time != st[stat.ST_MTIME]
1072 and time != st[stat.ST_MTIME] & _rangemask):
1072 ladd(fn)
1073 ladd(fn)
1073 elif st.st_mtime == lastnormaltime:
1074 elif st[stat.ST_MTIME] == lastnormaltime:
1074 # fn may have just been marked as normal and it may have
1075 # fn may have just been marked as normal and it may have
1075 # changed in the same second without changing its size.
1076 # changed in the same second without changing its size.
1076 # This can happen if we quickly do multiple commits.
1077 # This can happen if we quickly do multiple commits.
1077 # Force lookup, so we don't miss such a racy file change.
1078 # Force lookup, so we don't miss such a racy file change.
1078 ladd(fn)
1079 ladd(fn)
1079 elif listclean:
1080 elif listclean:
1080 cadd(fn)
1081 cadd(fn)
1081 elif state == 'm':
1082 elif state == 'm':
1082 madd(fn)
1083 madd(fn)
1083 elif state == 'a':
1084 elif state == 'a':
1084 aadd(fn)
1085 aadd(fn)
1085 elif state == 'r':
1086 elif state == 'r':
1086 radd(fn)
1087 radd(fn)
1087
1088
1088 return (lookup, scmutil.status(modified, added, removed, deleted,
1089 return (lookup, scmutil.status(modified, added, removed, deleted,
1089 unknown, ignored, clean))
1090 unknown, ignored, clean))
1090
1091
1091 def matches(self, match):
1092 def matches(self, match):
1092 '''
1093 '''
1093 return files in the dirstate (in whatever state) filtered by match
1094 return files in the dirstate (in whatever state) filtered by match
1094 '''
1095 '''
1095 dmap = self._map
1096 dmap = self._map
1096 if match.always():
1097 if match.always():
1097 return dmap.keys()
1098 return dmap.keys()
1098 files = match.files()
1099 files = match.files()
1099 if match.isexact():
1100 if match.isexact():
1100 # fast path -- filter the other way around, since typically files is
1101 # fast path -- filter the other way around, since typically files is
1101 # much smaller than dmap
1102 # much smaller than dmap
1102 return [f for f in files if f in dmap]
1103 return [f for f in files if f in dmap]
1103 if match.prefix() and all(fn in dmap for fn in files):
1104 if match.prefix() and all(fn in dmap for fn in files):
1104 # fast path -- all the values are known to be files, so just return
1105 # fast path -- all the values are known to be files, so just return
1105 # that
1106 # that
1106 return list(files)
1107 return list(files)
1107 return [f for f in dmap if match(f)]
1108 return [f for f in dmap if match(f)]
1108
1109
1109 def _actualfilename(self, tr):
1110 def _actualfilename(self, tr):
1110 if tr:
1111 if tr:
1111 return self._pendingfilename
1112 return self._pendingfilename
1112 else:
1113 else:
1113 return self._filename
1114 return self._filename
1114
1115
1115 def savebackup(self, tr, backupname):
1116 def savebackup(self, tr, backupname):
1116 '''Save current dirstate into backup file'''
1117 '''Save current dirstate into backup file'''
1117 filename = self._actualfilename(tr)
1118 filename = self._actualfilename(tr)
1118 assert backupname != filename
1119 assert backupname != filename
1119
1120
1120 # use '_writedirstate' instead of 'write' to write changes certainly,
1121 # use '_writedirstate' instead of 'write' to write changes certainly,
1121 # because the latter omits writing out if transaction is running.
1122 # because the latter omits writing out if transaction is running.
1122 # output file will be used to create backup of dirstate at this point.
1123 # output file will be used to create backup of dirstate at this point.
1123 if self._dirty or not self._opener.exists(filename):
1124 if self._dirty or not self._opener.exists(filename):
1124 self._writedirstate(self._opener(filename, "w", atomictemp=True,
1125 self._writedirstate(self._opener(filename, "w", atomictemp=True,
1125 checkambig=True))
1126 checkambig=True))
1126
1127
1127 if tr:
1128 if tr:
1128 # ensure that subsequent tr.writepending returns True for
1129 # ensure that subsequent tr.writepending returns True for
1129 # changes written out above, even if dirstate is never
1130 # changes written out above, even if dirstate is never
1130 # changed after this
1131 # changed after this
1131 tr.addfilegenerator('dirstate', (self._filename,),
1132 tr.addfilegenerator('dirstate', (self._filename,),
1132 self._writedirstate, location='plain')
1133 self._writedirstate, location='plain')
1133
1134
1134 # ensure that pending file written above is unlinked at
1135 # ensure that pending file written above is unlinked at
1135 # failure, even if tr.writepending isn't invoked until the
1136 # failure, even if tr.writepending isn't invoked until the
1136 # end of this transaction
1137 # end of this transaction
1137 tr.registertmp(filename, location='plain')
1138 tr.registertmp(filename, location='plain')
1138
1139
1139 self._opener.tryunlink(backupname)
1140 self._opener.tryunlink(backupname)
1140 # hardlink backup is okay because _writedirstate is always called
1141 # hardlink backup is okay because _writedirstate is always called
1141 # with an "atomictemp=True" file.
1142 # with an "atomictemp=True" file.
1142 util.copyfile(self._opener.join(filename),
1143 util.copyfile(self._opener.join(filename),
1143 self._opener.join(backupname), hardlink=True)
1144 self._opener.join(backupname), hardlink=True)
1144
1145
1145 def restorebackup(self, tr, backupname):
1146 def restorebackup(self, tr, backupname):
1146 '''Restore dirstate by backup file'''
1147 '''Restore dirstate by backup file'''
1147 # this "invalidate()" prevents "wlock.release()" from writing
1148 # this "invalidate()" prevents "wlock.release()" from writing
1148 # changes of dirstate out after restoring from backup file
1149 # changes of dirstate out after restoring from backup file
1149 self.invalidate()
1150 self.invalidate()
1150 filename = self._actualfilename(tr)
1151 filename = self._actualfilename(tr)
1151 o = self._opener
1152 o = self._opener
1152 if util.samefile(o.join(backupname), o.join(filename)):
1153 if util.samefile(o.join(backupname), o.join(filename)):
1153 o.unlink(backupname)
1154 o.unlink(backupname)
1154 else:
1155 else:
1155 o.rename(backupname, filename, checkambig=True)
1156 o.rename(backupname, filename, checkambig=True)
1156
1157
1157 def clearbackup(self, tr, backupname):
1158 def clearbackup(self, tr, backupname):
1158 '''Clear backup file'''
1159 '''Clear backup file'''
1159 self._opener.unlink(backupname)
1160 self._opener.unlink(backupname)
1160
1161
1161 class dirstatemap(object):
1162 class dirstatemap(object):
1162 """Map encapsulating the dirstate's contents.
1163 """Map encapsulating the dirstate's contents.
1163
1164
1164 The dirstate contains the following state:
1165 The dirstate contains the following state:
1165
1166
1166 - `identity` is the identity of the dirstate file, which can be used to
1167 - `identity` is the identity of the dirstate file, which can be used to
1167 detect when changes have occurred to the dirstate file.
1168 detect when changes have occurred to the dirstate file.
1168
1169
1169 - `parents` is a pair containing the parents of the working copy. The
1170 - `parents` is a pair containing the parents of the working copy. The
1170 parents are updated by calling `setparents`.
1171 parents are updated by calling `setparents`.
1171
1172
1172 - the state map maps filenames to tuples of (state, mode, size, mtime),
1173 - the state map maps filenames to tuples of (state, mode, size, mtime),
1173 where state is a single character representing 'normal', 'added',
1174 where state is a single character representing 'normal', 'added',
1174 'removed', or 'merged'. It is read by treating the dirstate as a
1175 'removed', or 'merged'. It is read by treating the dirstate as a
1175 dict. File state is updated by calling the `addfile`, `removefile` and
1176 dict. File state is updated by calling the `addfile`, `removefile` and
1176 `dropfile` methods.
1177 `dropfile` methods.
1177
1178
1178 - `copymap` maps destination filenames to their source filename.
1179 - `copymap` maps destination filenames to their source filename.
1179
1180
1180 The dirstate also provides the following views onto the state:
1181 The dirstate also provides the following views onto the state:
1181
1182
1182 - `nonnormalset` is a set of the filenames that have state other
1183 - `nonnormalset` is a set of the filenames that have state other
1183 than 'normal', or are normal but have an mtime of -1 ('normallookup').
1184 than 'normal', or are normal but have an mtime of -1 ('normallookup').
1184
1185
1185 - `otherparentset` is a set of the filenames that are marked as coming
1186 - `otherparentset` is a set of the filenames that are marked as coming
1186 from the second parent when the dirstate is currently being merged.
1187 from the second parent when the dirstate is currently being merged.
1187
1188
1188 - `filefoldmap` is a dict mapping normalized filenames to the denormalized
1189 - `filefoldmap` is a dict mapping normalized filenames to the denormalized
1189 form that they appear as in the dirstate.
1190 form that they appear as in the dirstate.
1190
1191
1191 - `dirfoldmap` is a dict mapping normalized directory names to the
1192 - `dirfoldmap` is a dict mapping normalized directory names to the
1192 denormalized form that they appear as in the dirstate.
1193 denormalized form that they appear as in the dirstate.
1193 """
1194 """
1194
1195
1195 def __init__(self, ui, opener, root):
1196 def __init__(self, ui, opener, root):
1196 self._ui = ui
1197 self._ui = ui
1197 self._opener = opener
1198 self._opener = opener
1198 self._root = root
1199 self._root = root
1199 self._filename = 'dirstate'
1200 self._filename = 'dirstate'
1200
1201
1201 self._parents = None
1202 self._parents = None
1202 self._dirtyparents = False
1203 self._dirtyparents = False
1203
1204
1204 # for consistent view between _pl() and _read() invocations
1205 # for consistent view between _pl() and _read() invocations
1205 self._pendingmode = None
1206 self._pendingmode = None
1206
1207
1207 @propertycache
1208 @propertycache
1208 def _map(self):
1209 def _map(self):
1209 self._map = {}
1210 self._map = {}
1210 self.read()
1211 self.read()
1211 return self._map
1212 return self._map
1212
1213
1213 @propertycache
1214 @propertycache
1214 def copymap(self):
1215 def copymap(self):
1215 self.copymap = {}
1216 self.copymap = {}
1216 self._map
1217 self._map
1217 return self.copymap
1218 return self.copymap
1218
1219
1219 def clear(self):
1220 def clear(self):
1220 self._map.clear()
1221 self._map.clear()
1221 self.copymap.clear()
1222 self.copymap.clear()
1222 self.setparents(nullid, nullid)
1223 self.setparents(nullid, nullid)
1223 util.clearcachedproperty(self, "_dirs")
1224 util.clearcachedproperty(self, "_dirs")
1224 util.clearcachedproperty(self, "_alldirs")
1225 util.clearcachedproperty(self, "_alldirs")
1225 util.clearcachedproperty(self, "filefoldmap")
1226 util.clearcachedproperty(self, "filefoldmap")
1226 util.clearcachedproperty(self, "dirfoldmap")
1227 util.clearcachedproperty(self, "dirfoldmap")
1227 util.clearcachedproperty(self, "nonnormalset")
1228 util.clearcachedproperty(self, "nonnormalset")
1228 util.clearcachedproperty(self, "otherparentset")
1229 util.clearcachedproperty(self, "otherparentset")
1229
1230
1230 def items(self):
1231 def items(self):
1231 return self._map.iteritems()
1232 return self._map.iteritems()
1232
1233
1233 # forward for python2,3 compat
1234 # forward for python2,3 compat
1234 iteritems = items
1235 iteritems = items
1235
1236
1236 def __len__(self):
1237 def __len__(self):
1237 return len(self._map)
1238 return len(self._map)
1238
1239
1239 def __iter__(self):
1240 def __iter__(self):
1240 return iter(self._map)
1241 return iter(self._map)
1241
1242
1242 def get(self, key, default=None):
1243 def get(self, key, default=None):
1243 return self._map.get(key, default)
1244 return self._map.get(key, default)
1244
1245
1245 def __contains__(self, key):
1246 def __contains__(self, key):
1246 return key in self._map
1247 return key in self._map
1247
1248
1248 def __getitem__(self, key):
1249 def __getitem__(self, key):
1249 return self._map[key]
1250 return self._map[key]
1250
1251
1251 def keys(self):
1252 def keys(self):
1252 return self._map.keys()
1253 return self._map.keys()
1253
1254
1254 def preload(self):
1255 def preload(self):
1255 """Loads the underlying data, if it's not already loaded"""
1256 """Loads the underlying data, if it's not already loaded"""
1256 self._map
1257 self._map
1257
1258
1258 def addfile(self, f, oldstate, state, mode, size, mtime):
1259 def addfile(self, f, oldstate, state, mode, size, mtime):
1259 """Add a tracked file to the dirstate."""
1260 """Add a tracked file to the dirstate."""
1260 if oldstate in "?r" and r"_dirs" in self.__dict__:
1261 if oldstate in "?r" and r"_dirs" in self.__dict__:
1261 self._dirs.addpath(f)
1262 self._dirs.addpath(f)
1262 if oldstate == "?" and r"_alldirs" in self.__dict__:
1263 if oldstate == "?" and r"_alldirs" in self.__dict__:
1263 self._alldirs.addpath(f)
1264 self._alldirs.addpath(f)
1264 self._map[f] = dirstatetuple(state, mode, size, mtime)
1265 self._map[f] = dirstatetuple(state, mode, size, mtime)
1265 if state != 'n' or mtime == -1:
1266 if state != 'n' or mtime == -1:
1266 self.nonnormalset.add(f)
1267 self.nonnormalset.add(f)
1267 if size == -2:
1268 if size == -2:
1268 self.otherparentset.add(f)
1269 self.otherparentset.add(f)
1269
1270
1270 def removefile(self, f, oldstate, size):
1271 def removefile(self, f, oldstate, size):
1271 """
1272 """
1272 Mark a file as removed in the dirstate.
1273 Mark a file as removed in the dirstate.
1273
1274
1274 The `size` parameter is used to store sentinel values that indicate
1275 The `size` parameter is used to store sentinel values that indicate
1275 the file's previous state. In the future, we should refactor this
1276 the file's previous state. In the future, we should refactor this
1276 to be more explicit about what that state is.
1277 to be more explicit about what that state is.
1277 """
1278 """
1278 if oldstate not in "?r" and r"_dirs" in self.__dict__:
1279 if oldstate not in "?r" and r"_dirs" in self.__dict__:
1279 self._dirs.delpath(f)
1280 self._dirs.delpath(f)
1280 if oldstate == "?" and r"_alldirs" in self.__dict__:
1281 if oldstate == "?" and r"_alldirs" in self.__dict__:
1281 self._alldirs.addpath(f)
1282 self._alldirs.addpath(f)
1282 if r"filefoldmap" in self.__dict__:
1283 if r"filefoldmap" in self.__dict__:
1283 normed = util.normcase(f)
1284 normed = util.normcase(f)
1284 self.filefoldmap.pop(normed, None)
1285 self.filefoldmap.pop(normed, None)
1285 self._map[f] = dirstatetuple('r', 0, size, 0)
1286 self._map[f] = dirstatetuple('r', 0, size, 0)
1286 self.nonnormalset.add(f)
1287 self.nonnormalset.add(f)
1287
1288
1288 def dropfile(self, f, oldstate):
1289 def dropfile(self, f, oldstate):
1289 """
1290 """
1290 Remove a file from the dirstate. Returns True if the file was
1291 Remove a file from the dirstate. Returns True if the file was
1291 previously recorded.
1292 previously recorded.
1292 """
1293 """
1293 exists = self._map.pop(f, None) is not None
1294 exists = self._map.pop(f, None) is not None
1294 if exists:
1295 if exists:
1295 if oldstate != "r" and r"_dirs" in self.__dict__:
1296 if oldstate != "r" and r"_dirs" in self.__dict__:
1296 self._dirs.delpath(f)
1297 self._dirs.delpath(f)
1297 if r"_alldirs" in self.__dict__:
1298 if r"_alldirs" in self.__dict__:
1298 self._alldirs.delpath(f)
1299 self._alldirs.delpath(f)
1299 if r"filefoldmap" in self.__dict__:
1300 if r"filefoldmap" in self.__dict__:
1300 normed = util.normcase(f)
1301 normed = util.normcase(f)
1301 self.filefoldmap.pop(normed, None)
1302 self.filefoldmap.pop(normed, None)
1302 self.nonnormalset.discard(f)
1303 self.nonnormalset.discard(f)
1303 return exists
1304 return exists
1304
1305
1305 def clearambiguoustimes(self, files, now):
1306 def clearambiguoustimes(self, files, now):
1306 for f in files:
1307 for f in files:
1307 e = self.get(f)
1308 e = self.get(f)
1308 if e is not None and e[0] == 'n' and e[3] == now:
1309 if e is not None and e[0] == 'n' and e[3] == now:
1309 self._map[f] = dirstatetuple(e[0], e[1], e[2], -1)
1310 self._map[f] = dirstatetuple(e[0], e[1], e[2], -1)
1310 self.nonnormalset.add(f)
1311 self.nonnormalset.add(f)
1311
1312
1312 def nonnormalentries(self):
1313 def nonnormalentries(self):
1313 '''Compute the nonnormal dirstate entries from the dmap'''
1314 '''Compute the nonnormal dirstate entries from the dmap'''
1314 try:
1315 try:
1315 return parsers.nonnormalotherparententries(self._map)
1316 return parsers.nonnormalotherparententries(self._map)
1316 except AttributeError:
1317 except AttributeError:
1317 nonnorm = set()
1318 nonnorm = set()
1318 otherparent = set()
1319 otherparent = set()
1319 for fname, e in self._map.iteritems():
1320 for fname, e in self._map.iteritems():
1320 if e[0] != 'n' or e[3] == -1:
1321 if e[0] != 'n' or e[3] == -1:
1321 nonnorm.add(fname)
1322 nonnorm.add(fname)
1322 if e[0] == 'n' and e[2] == -2:
1323 if e[0] == 'n' and e[2] == -2:
1323 otherparent.add(fname)
1324 otherparent.add(fname)
1324 return nonnorm, otherparent
1325 return nonnorm, otherparent
1325
1326
1326 @propertycache
1327 @propertycache
1327 def filefoldmap(self):
1328 def filefoldmap(self):
1328 """Returns a dictionary mapping normalized case paths to their
1329 """Returns a dictionary mapping normalized case paths to their
1329 non-normalized versions.
1330 non-normalized versions.
1330 """
1331 """
1331 try:
1332 try:
1332 makefilefoldmap = parsers.make_file_foldmap
1333 makefilefoldmap = parsers.make_file_foldmap
1333 except AttributeError:
1334 except AttributeError:
1334 pass
1335 pass
1335 else:
1336 else:
1336 return makefilefoldmap(self._map, util.normcasespec,
1337 return makefilefoldmap(self._map, util.normcasespec,
1337 util.normcasefallback)
1338 util.normcasefallback)
1338
1339
1339 f = {}
1340 f = {}
1340 normcase = util.normcase
1341 normcase = util.normcase
1341 for name, s in self._map.iteritems():
1342 for name, s in self._map.iteritems():
1342 if s[0] != 'r':
1343 if s[0] != 'r':
1343 f[normcase(name)] = name
1344 f[normcase(name)] = name
1344 f['.'] = '.' # prevents useless util.fspath() invocation
1345 f['.'] = '.' # prevents useless util.fspath() invocation
1345 return f
1346 return f
1346
1347
1347 def hastrackeddir(self, d):
1348 def hastrackeddir(self, d):
1348 """
1349 """
1349 Returns True if the dirstate contains a tracked (not removed) file
1350 Returns True if the dirstate contains a tracked (not removed) file
1350 in this directory.
1351 in this directory.
1351 """
1352 """
1352 return d in self._dirs
1353 return d in self._dirs
1353
1354
1354 def hasdir(self, d):
1355 def hasdir(self, d):
1355 """
1356 """
1356 Returns True if the dirstate contains a file (tracked or removed)
1357 Returns True if the dirstate contains a file (tracked or removed)
1357 in this directory.
1358 in this directory.
1358 """
1359 """
1359 return d in self._alldirs
1360 return d in self._alldirs
1360
1361
1361 @propertycache
1362 @propertycache
1362 def _dirs(self):
1363 def _dirs(self):
1363 return util.dirs(self._map, 'r')
1364 return util.dirs(self._map, 'r')
1364
1365
1365 @propertycache
1366 @propertycache
1366 def _alldirs(self):
1367 def _alldirs(self):
1367 return util.dirs(self._map)
1368 return util.dirs(self._map)
1368
1369
1369 def _opendirstatefile(self):
1370 def _opendirstatefile(self):
1370 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
1371 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
1371 if self._pendingmode is not None and self._pendingmode != mode:
1372 if self._pendingmode is not None and self._pendingmode != mode:
1372 fp.close()
1373 fp.close()
1373 raise error.Abort(_('working directory state may be '
1374 raise error.Abort(_('working directory state may be '
1374 'changed parallelly'))
1375 'changed parallelly'))
1375 self._pendingmode = mode
1376 self._pendingmode = mode
1376 return fp
1377 return fp
1377
1378
1378 def parents(self):
1379 def parents(self):
1379 if not self._parents:
1380 if not self._parents:
1380 try:
1381 try:
1381 fp = self._opendirstatefile()
1382 fp = self._opendirstatefile()
1382 st = fp.read(40)
1383 st = fp.read(40)
1383 fp.close()
1384 fp.close()
1384 except IOError as err:
1385 except IOError as err:
1385 if err.errno != errno.ENOENT:
1386 if err.errno != errno.ENOENT:
1386 raise
1387 raise
1387 # File doesn't exist, so the current state is empty
1388 # File doesn't exist, so the current state is empty
1388 st = ''
1389 st = ''
1389
1390
1390 l = len(st)
1391 l = len(st)
1391 if l == 40:
1392 if l == 40:
1392 self._parents = st[:20], st[20:40]
1393 self._parents = st[:20], st[20:40]
1393 elif l == 0:
1394 elif l == 0:
1394 self._parents = [nullid, nullid]
1395 self._parents = [nullid, nullid]
1395 else:
1396 else:
1396 raise error.Abort(_('working directory state appears '
1397 raise error.Abort(_('working directory state appears '
1397 'damaged!'))
1398 'damaged!'))
1398
1399
1399 return self._parents
1400 return self._parents
1400
1401
1401 def setparents(self, p1, p2):
1402 def setparents(self, p1, p2):
1402 self._parents = (p1, p2)
1403 self._parents = (p1, p2)
1403 self._dirtyparents = True
1404 self._dirtyparents = True
1404
1405
1405 def read(self):
1406 def read(self):
1406 # ignore HG_PENDING because identity is used only for writing
1407 # ignore HG_PENDING because identity is used only for writing
1407 self.identity = util.filestat.frompath(
1408 self.identity = util.filestat.frompath(
1408 self._opener.join(self._filename))
1409 self._opener.join(self._filename))
1409
1410
1410 try:
1411 try:
1411 fp = self._opendirstatefile()
1412 fp = self._opendirstatefile()
1412 try:
1413 try:
1413 st = fp.read()
1414 st = fp.read()
1414 finally:
1415 finally:
1415 fp.close()
1416 fp.close()
1416 except IOError as err:
1417 except IOError as err:
1417 if err.errno != errno.ENOENT:
1418 if err.errno != errno.ENOENT:
1418 raise
1419 raise
1419 return
1420 return
1420 if not st:
1421 if not st:
1421 return
1422 return
1422
1423
1423 if util.safehasattr(parsers, 'dict_new_presized'):
1424 if util.safehasattr(parsers, 'dict_new_presized'):
1424 # Make an estimate of the number of files in the dirstate based on
1425 # Make an estimate of the number of files in the dirstate based on
1425 # its size. From a linear regression on a set of real-world repos,
1426 # its size. From a linear regression on a set of real-world repos,
1426 # all over 10,000 files, the size of a dirstate entry is 85
1427 # all over 10,000 files, the size of a dirstate entry is 85
1427 # bytes. The cost of resizing is significantly higher than the cost
1428 # bytes. The cost of resizing is significantly higher than the cost
1428 # of filling in a larger presized dict, so subtract 20% from the
1429 # of filling in a larger presized dict, so subtract 20% from the
1429 # size.
1430 # size.
1430 #
1431 #
1431 # This heuristic is imperfect in many ways, so in a future dirstate
1432 # This heuristic is imperfect in many ways, so in a future dirstate
1432 # format update it makes sense to just record the number of entries
1433 # format update it makes sense to just record the number of entries
1433 # on write.
1434 # on write.
1434 self._map = parsers.dict_new_presized(len(st) // 71)
1435 self._map = parsers.dict_new_presized(len(st) // 71)
1435
1436
1436 # Python's garbage collector triggers a GC each time a certain number
1437 # Python's garbage collector triggers a GC each time a certain number
1437 # of container objects (the number being defined by
1438 # of container objects (the number being defined by
1438 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
1439 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
1439 # for each file in the dirstate. The C version then immediately marks
1440 # for each file in the dirstate. The C version then immediately marks
1440 # them as not to be tracked by the collector. However, this has no
1441 # them as not to be tracked by the collector. However, this has no
1441 # effect on when GCs are triggered, only on what objects the GC looks
1442 # effect on when GCs are triggered, only on what objects the GC looks
1442 # into. This means that O(number of files) GCs are unavoidable.
1443 # into. This means that O(number of files) GCs are unavoidable.
1443 # Depending on when in the process's lifetime the dirstate is parsed,
1444 # Depending on when in the process's lifetime the dirstate is parsed,
1444 # this can get very expensive. As a workaround, disable GC while
1445 # this can get very expensive. As a workaround, disable GC while
1445 # parsing the dirstate.
1446 # parsing the dirstate.
1446 #
1447 #
1447 # (we cannot decorate the function directly since it is in a C module)
1448 # (we cannot decorate the function directly since it is in a C module)
1448 parse_dirstate = util.nogc(parsers.parse_dirstate)
1449 parse_dirstate = util.nogc(parsers.parse_dirstate)
1449 p = parse_dirstate(self._map, self.copymap, st)
1450 p = parse_dirstate(self._map, self.copymap, st)
1450 if not self._dirtyparents:
1451 if not self._dirtyparents:
1451 self.setparents(*p)
1452 self.setparents(*p)
1452
1453
1453 # Avoid excess attribute lookups by fast pathing certain checks
1454 # Avoid excess attribute lookups by fast pathing certain checks
1454 self.__contains__ = self._map.__contains__
1455 self.__contains__ = self._map.__contains__
1455 self.__getitem__ = self._map.__getitem__
1456 self.__getitem__ = self._map.__getitem__
1456 self.get = self._map.get
1457 self.get = self._map.get
1457
1458
1458 def write(self, st, now):
1459 def write(self, st, now):
1459 st.write(parsers.pack_dirstate(self._map, self.copymap,
1460 st.write(parsers.pack_dirstate(self._map, self.copymap,
1460 self.parents(), now))
1461 self.parents(), now))
1461 st.close()
1462 st.close()
1462 self._dirtyparents = False
1463 self._dirtyparents = False
1463 self.nonnormalset, self.otherparentset = self.nonnormalentries()
1464 self.nonnormalset, self.otherparentset = self.nonnormalentries()
1464
1465
1465 @propertycache
1466 @propertycache
1466 def nonnormalset(self):
1467 def nonnormalset(self):
1467 nonnorm, otherparents = self.nonnormalentries()
1468 nonnorm, otherparents = self.nonnormalentries()
1468 self.otherparentset = otherparents
1469 self.otherparentset = otherparents
1469 return nonnorm
1470 return nonnorm
1470
1471
1471 @propertycache
1472 @propertycache
1472 def otherparentset(self):
1473 def otherparentset(self):
1473 nonnorm, otherparents = self.nonnormalentries()
1474 nonnorm, otherparents = self.nonnormalentries()
1474 self.nonnormalset = nonnorm
1475 self.nonnormalset = nonnorm
1475 return otherparents
1476 return otherparents
1476
1477
1477 @propertycache
1478 @propertycache
1478 def identity(self):
1479 def identity(self):
1479 self._map
1480 self._map
1480 return self.identity
1481 return self.identity
1481
1482
1482 @propertycache
1483 @propertycache
1483 def dirfoldmap(self):
1484 def dirfoldmap(self):
1484 f = {}
1485 f = {}
1485 normcase = util.normcase
1486 normcase = util.normcase
1486 for name in self._dirs:
1487 for name in self._dirs:
1487 f[normcase(name)] = name
1488 f[normcase(name)] = name
1488 return f
1489 return f
@@ -1,1135 +1,1136 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13 import os
13 import os
14 import shutil
14 import shutil
15 import stat
15
16
16 from .i18n import _
17 from .i18n import _
17 from .node import (
18 from .node import (
18 nullid,
19 nullid,
19 )
20 )
20
21
21 from . import (
22 from . import (
22 bookmarks,
23 bookmarks,
23 bundlerepo,
24 bundlerepo,
24 cacheutil,
25 cacheutil,
25 cmdutil,
26 cmdutil,
26 destutil,
27 destutil,
27 discovery,
28 discovery,
28 error,
29 error,
29 exchange,
30 exchange,
30 extensions,
31 extensions,
31 httppeer,
32 httppeer,
32 localrepo,
33 localrepo,
33 lock,
34 lock,
34 logcmdutil,
35 logcmdutil,
35 logexchange,
36 logexchange,
36 merge as mergemod,
37 merge as mergemod,
37 node,
38 node,
38 phases,
39 phases,
39 scmutil,
40 scmutil,
40 sshpeer,
41 sshpeer,
41 statichttprepo,
42 statichttprepo,
42 ui as uimod,
43 ui as uimod,
43 unionrepo,
44 unionrepo,
44 url,
45 url,
45 util,
46 util,
46 verify as verifymod,
47 verify as verifymod,
47 vfs as vfsmod,
48 vfs as vfsmod,
48 )
49 )
49
50
50 release = lock.release
51 release = lock.release
51
52
52 # shared features
53 # shared features
53 sharedbookmarks = 'bookmarks'
54 sharedbookmarks = 'bookmarks'
54
55
55 def _local(path):
56 def _local(path):
56 path = util.expandpath(util.urllocalpath(path))
57 path = util.expandpath(util.urllocalpath(path))
57 return (os.path.isfile(path) and bundlerepo or localrepo)
58 return (os.path.isfile(path) and bundlerepo or localrepo)
58
59
59 def addbranchrevs(lrepo, other, branches, revs):
60 def addbranchrevs(lrepo, other, branches, revs):
60 peer = other.peer() # a courtesy to callers using a localrepo for other
61 peer = other.peer() # a courtesy to callers using a localrepo for other
61 hashbranch, branches = branches
62 hashbranch, branches = branches
62 if not hashbranch and not branches:
63 if not hashbranch and not branches:
63 x = revs or None
64 x = revs or None
64 if util.safehasattr(revs, 'first'):
65 if util.safehasattr(revs, 'first'):
65 y = revs.first()
66 y = revs.first()
66 elif revs:
67 elif revs:
67 y = revs[0]
68 y = revs[0]
68 else:
69 else:
69 y = None
70 y = None
70 return x, y
71 return x, y
71 if revs:
72 if revs:
72 revs = list(revs)
73 revs = list(revs)
73 else:
74 else:
74 revs = []
75 revs = []
75
76
76 if not peer.capable('branchmap'):
77 if not peer.capable('branchmap'):
77 if branches:
78 if branches:
78 raise error.Abort(_("remote branch lookup not supported"))
79 raise error.Abort(_("remote branch lookup not supported"))
79 revs.append(hashbranch)
80 revs.append(hashbranch)
80 return revs, revs[0]
81 return revs, revs[0]
81 branchmap = peer.branchmap()
82 branchmap = peer.branchmap()
82
83
83 def primary(branch):
84 def primary(branch):
84 if branch == '.':
85 if branch == '.':
85 if not lrepo:
86 if not lrepo:
86 raise error.Abort(_("dirstate branch not accessible"))
87 raise error.Abort(_("dirstate branch not accessible"))
87 branch = lrepo.dirstate.branch()
88 branch = lrepo.dirstate.branch()
88 if branch in branchmap:
89 if branch in branchmap:
89 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
90 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
90 return True
91 return True
91 else:
92 else:
92 return False
93 return False
93
94
94 for branch in branches:
95 for branch in branches:
95 if not primary(branch):
96 if not primary(branch):
96 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
97 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
97 if hashbranch:
98 if hashbranch:
98 if not primary(hashbranch):
99 if not primary(hashbranch):
99 revs.append(hashbranch)
100 revs.append(hashbranch)
100 return revs, revs[0]
101 return revs, revs[0]
101
102
102 def parseurl(path, branches=None):
103 def parseurl(path, branches=None):
103 '''parse url#branch, returning (url, (branch, branches))'''
104 '''parse url#branch, returning (url, (branch, branches))'''
104
105
105 u = util.url(path)
106 u = util.url(path)
106 branch = None
107 branch = None
107 if u.fragment:
108 if u.fragment:
108 branch = u.fragment
109 branch = u.fragment
109 u.fragment = None
110 u.fragment = None
110 return bytes(u), (branch, branches or [])
111 return bytes(u), (branch, branches or [])
111
112
112 schemes = {
113 schemes = {
113 'bundle': bundlerepo,
114 'bundle': bundlerepo,
114 'union': unionrepo,
115 'union': unionrepo,
115 'file': _local,
116 'file': _local,
116 'http': httppeer,
117 'http': httppeer,
117 'https': httppeer,
118 'https': httppeer,
118 'ssh': sshpeer,
119 'ssh': sshpeer,
119 'static-http': statichttprepo,
120 'static-http': statichttprepo,
120 }
121 }
121
122
122 def _peerlookup(path):
123 def _peerlookup(path):
123 u = util.url(path)
124 u = util.url(path)
124 scheme = u.scheme or 'file'
125 scheme = u.scheme or 'file'
125 thing = schemes.get(scheme) or schemes['file']
126 thing = schemes.get(scheme) or schemes['file']
126 try:
127 try:
127 return thing(path)
128 return thing(path)
128 except TypeError:
129 except TypeError:
129 # we can't test callable(thing) because 'thing' can be an unloaded
130 # we can't test callable(thing) because 'thing' can be an unloaded
130 # module that implements __call__
131 # module that implements __call__
131 if not util.safehasattr(thing, 'instance'):
132 if not util.safehasattr(thing, 'instance'):
132 raise
133 raise
133 return thing
134 return thing
134
135
135 def islocal(repo):
136 def islocal(repo):
136 '''return true if repo (or path pointing to repo) is local'''
137 '''return true if repo (or path pointing to repo) is local'''
137 if isinstance(repo, bytes):
138 if isinstance(repo, bytes):
138 try:
139 try:
139 return _peerlookup(repo).islocal(repo)
140 return _peerlookup(repo).islocal(repo)
140 except AttributeError:
141 except AttributeError:
141 return False
142 return False
142 return repo.local()
143 return repo.local()
143
144
144 def openpath(ui, path):
145 def openpath(ui, path):
145 '''open path with open if local, url.open if remote'''
146 '''open path with open if local, url.open if remote'''
146 pathurl = util.url(path, parsequery=False, parsefragment=False)
147 pathurl = util.url(path, parsequery=False, parsefragment=False)
147 if pathurl.islocal():
148 if pathurl.islocal():
148 return util.posixfile(pathurl.localpath(), 'rb')
149 return util.posixfile(pathurl.localpath(), 'rb')
149 else:
150 else:
150 return url.open(ui, path)
151 return url.open(ui, path)
151
152
152 # a list of (ui, repo) functions called for wire peer initialization
153 # a list of (ui, repo) functions called for wire peer initialization
153 wirepeersetupfuncs = []
154 wirepeersetupfuncs = []
154
155
155 def _peerorrepo(ui, path, create=False, presetupfuncs=None):
156 def _peerorrepo(ui, path, create=False, presetupfuncs=None):
156 """return a repository object for the specified path"""
157 """return a repository object for the specified path"""
157 obj = _peerlookup(path).instance(ui, path, create)
158 obj = _peerlookup(path).instance(ui, path, create)
158 ui = getattr(obj, "ui", ui)
159 ui = getattr(obj, "ui", ui)
159 for f in presetupfuncs or []:
160 for f in presetupfuncs or []:
160 f(ui, obj)
161 f(ui, obj)
161 for name, module in extensions.extensions(ui):
162 for name, module in extensions.extensions(ui):
162 hook = getattr(module, 'reposetup', None)
163 hook = getattr(module, 'reposetup', None)
163 if hook:
164 if hook:
164 hook(ui, obj)
165 hook(ui, obj)
165 if not obj.local():
166 if not obj.local():
166 for f in wirepeersetupfuncs:
167 for f in wirepeersetupfuncs:
167 f(ui, obj)
168 f(ui, obj)
168 return obj
169 return obj
169
170
170 def repository(ui, path='', create=False, presetupfuncs=None):
171 def repository(ui, path='', create=False, presetupfuncs=None):
171 """return a repository object for the specified path"""
172 """return a repository object for the specified path"""
172 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs)
173 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs)
173 repo = peer.local()
174 repo = peer.local()
174 if not repo:
175 if not repo:
175 raise error.Abort(_("repository '%s' is not local") %
176 raise error.Abort(_("repository '%s' is not local") %
176 (path or peer.url()))
177 (path or peer.url()))
177 return repo.filtered('visible')
178 return repo.filtered('visible')
178
179
179 def peer(uiorrepo, opts, path, create=False):
180 def peer(uiorrepo, opts, path, create=False):
180 '''return a repository peer for the specified path'''
181 '''return a repository peer for the specified path'''
181 rui = remoteui(uiorrepo, opts)
182 rui = remoteui(uiorrepo, opts)
182 return _peerorrepo(rui, path, create).peer()
183 return _peerorrepo(rui, path, create).peer()
183
184
184 def defaultdest(source):
185 def defaultdest(source):
185 '''return default destination of clone if none is given
186 '''return default destination of clone if none is given
186
187
187 >>> defaultdest(b'foo')
188 >>> defaultdest(b'foo')
188 'foo'
189 'foo'
189 >>> defaultdest(b'/foo/bar')
190 >>> defaultdest(b'/foo/bar')
190 'bar'
191 'bar'
191 >>> defaultdest(b'/')
192 >>> defaultdest(b'/')
192 ''
193 ''
193 >>> defaultdest(b'')
194 >>> defaultdest(b'')
194 ''
195 ''
195 >>> defaultdest(b'http://example.org/')
196 >>> defaultdest(b'http://example.org/')
196 ''
197 ''
197 >>> defaultdest(b'http://example.org/foo/')
198 >>> defaultdest(b'http://example.org/foo/')
198 'foo'
199 'foo'
199 '''
200 '''
200 path = util.url(source).path
201 path = util.url(source).path
201 if not path:
202 if not path:
202 return ''
203 return ''
203 return os.path.basename(os.path.normpath(path))
204 return os.path.basename(os.path.normpath(path))
204
205
205 def sharedreposource(repo):
206 def sharedreposource(repo):
206 """Returns repository object for source repository of a shared repo.
207 """Returns repository object for source repository of a shared repo.
207
208
208 If repo is not a shared repository, returns None.
209 If repo is not a shared repository, returns None.
209 """
210 """
210 if repo.sharedpath == repo.path:
211 if repo.sharedpath == repo.path:
211 return None
212 return None
212
213
213 if util.safehasattr(repo, 'srcrepo') and repo.srcrepo:
214 if util.safehasattr(repo, 'srcrepo') and repo.srcrepo:
214 return repo.srcrepo
215 return repo.srcrepo
215
216
216 # the sharedpath always ends in the .hg; we want the path to the repo
217 # the sharedpath always ends in the .hg; we want the path to the repo
217 source = repo.vfs.split(repo.sharedpath)[0]
218 source = repo.vfs.split(repo.sharedpath)[0]
218 srcurl, branches = parseurl(source)
219 srcurl, branches = parseurl(source)
219 srcrepo = repository(repo.ui, srcurl)
220 srcrepo = repository(repo.ui, srcurl)
220 repo.srcrepo = srcrepo
221 repo.srcrepo = srcrepo
221 return srcrepo
222 return srcrepo
222
223
223 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
224 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
224 relative=False):
225 relative=False):
225 '''create a shared repository'''
226 '''create a shared repository'''
226
227
227 if not islocal(source):
228 if not islocal(source):
228 raise error.Abort(_('can only share local repositories'))
229 raise error.Abort(_('can only share local repositories'))
229
230
230 if not dest:
231 if not dest:
231 dest = defaultdest(source)
232 dest = defaultdest(source)
232 else:
233 else:
233 dest = ui.expandpath(dest)
234 dest = ui.expandpath(dest)
234
235
235 if isinstance(source, bytes):
236 if isinstance(source, bytes):
236 origsource = ui.expandpath(source)
237 origsource = ui.expandpath(source)
237 source, branches = parseurl(origsource)
238 source, branches = parseurl(origsource)
238 srcrepo = repository(ui, source)
239 srcrepo = repository(ui, source)
239 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
240 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
240 else:
241 else:
241 srcrepo = source.local()
242 srcrepo = source.local()
242 origsource = source = srcrepo.url()
243 origsource = source = srcrepo.url()
243 checkout = None
244 checkout = None
244
245
245 sharedpath = srcrepo.sharedpath # if our source is already sharing
246 sharedpath = srcrepo.sharedpath # if our source is already sharing
246
247
247 destwvfs = vfsmod.vfs(dest, realpath=True)
248 destwvfs = vfsmod.vfs(dest, realpath=True)
248 destvfs = vfsmod.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
249 destvfs = vfsmod.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
249
250
250 if destvfs.lexists():
251 if destvfs.lexists():
251 raise error.Abort(_('destination already exists'))
252 raise error.Abort(_('destination already exists'))
252
253
253 if not destwvfs.isdir():
254 if not destwvfs.isdir():
254 destwvfs.mkdir()
255 destwvfs.mkdir()
255 destvfs.makedir()
256 destvfs.makedir()
256
257
257 requirements = ''
258 requirements = ''
258 try:
259 try:
259 requirements = srcrepo.vfs.read('requires')
260 requirements = srcrepo.vfs.read('requires')
260 except IOError as inst:
261 except IOError as inst:
261 if inst.errno != errno.ENOENT:
262 if inst.errno != errno.ENOENT:
262 raise
263 raise
263
264
264 if relative:
265 if relative:
265 try:
266 try:
266 sharedpath = os.path.relpath(sharedpath, destvfs.base)
267 sharedpath = os.path.relpath(sharedpath, destvfs.base)
267 requirements += 'relshared\n'
268 requirements += 'relshared\n'
268 except (IOError, ValueError) as e:
269 except (IOError, ValueError) as e:
269 # ValueError is raised on Windows if the drive letters differ on
270 # ValueError is raised on Windows if the drive letters differ on
270 # each path
271 # each path
271 raise error.Abort(_('cannot calculate relative path'),
272 raise error.Abort(_('cannot calculate relative path'),
272 hint=util.forcebytestr(e))
273 hint=util.forcebytestr(e))
273 else:
274 else:
274 requirements += 'shared\n'
275 requirements += 'shared\n'
275
276
276 destvfs.write('requires', requirements)
277 destvfs.write('requires', requirements)
277 destvfs.write('sharedpath', sharedpath)
278 destvfs.write('sharedpath', sharedpath)
278
279
279 r = repository(ui, destwvfs.base)
280 r = repository(ui, destwvfs.base)
280 postshare(srcrepo, r, bookmarks=bookmarks, defaultpath=defaultpath)
281 postshare(srcrepo, r, bookmarks=bookmarks, defaultpath=defaultpath)
281 _postshareupdate(r, update, checkout=checkout)
282 _postshareupdate(r, update, checkout=checkout)
282 return r
283 return r
283
284
284 def unshare(ui, repo):
285 def unshare(ui, repo):
285 """convert a shared repository to a normal one
286 """convert a shared repository to a normal one
286
287
287 Copy the store data to the repo and remove the sharedpath data.
288 Copy the store data to the repo and remove the sharedpath data.
288 """
289 """
289
290
290 destlock = lock = None
291 destlock = lock = None
291 lock = repo.lock()
292 lock = repo.lock()
292 try:
293 try:
293 # we use locks here because if we race with commit, we
294 # we use locks here because if we race with commit, we
294 # can end up with extra data in the cloned revlogs that's
295 # can end up with extra data in the cloned revlogs that's
295 # not pointed to by changesets, thus causing verify to
296 # not pointed to by changesets, thus causing verify to
296 # fail
297 # fail
297
298
298 destlock = copystore(ui, repo, repo.path)
299 destlock = copystore(ui, repo, repo.path)
299
300
300 sharefile = repo.vfs.join('sharedpath')
301 sharefile = repo.vfs.join('sharedpath')
301 util.rename(sharefile, sharefile + '.old')
302 util.rename(sharefile, sharefile + '.old')
302
303
303 repo.requirements.discard('shared')
304 repo.requirements.discard('shared')
304 repo.requirements.discard('relshared')
305 repo.requirements.discard('relshared')
305 repo._writerequirements()
306 repo._writerequirements()
306 finally:
307 finally:
307 destlock and destlock.release()
308 destlock and destlock.release()
308 lock and lock.release()
309 lock and lock.release()
309
310
310 # update store, spath, svfs and sjoin of repo
311 # update store, spath, svfs and sjoin of repo
311 repo.unfiltered().__init__(repo.baseui, repo.root)
312 repo.unfiltered().__init__(repo.baseui, repo.root)
312
313
313 # TODO: figure out how to access subrepos that exist, but were previously
314 # TODO: figure out how to access subrepos that exist, but were previously
314 # removed from .hgsub
315 # removed from .hgsub
315 c = repo['.']
316 c = repo['.']
316 subs = c.substate
317 subs = c.substate
317 for s in sorted(subs):
318 for s in sorted(subs):
318 c.sub(s).unshare()
319 c.sub(s).unshare()
319
320
320 def postshare(sourcerepo, destrepo, bookmarks=True, defaultpath=None):
321 def postshare(sourcerepo, destrepo, bookmarks=True, defaultpath=None):
321 """Called after a new shared repo is created.
322 """Called after a new shared repo is created.
322
323
323 The new repo only has a requirements file and pointer to the source.
324 The new repo only has a requirements file and pointer to the source.
324 This function configures additional shared data.
325 This function configures additional shared data.
325
326
326 Extensions can wrap this function and write additional entries to
327 Extensions can wrap this function and write additional entries to
327 destrepo/.hg/shared to indicate additional pieces of data to be shared.
328 destrepo/.hg/shared to indicate additional pieces of data to be shared.
328 """
329 """
329 default = defaultpath or sourcerepo.ui.config('paths', 'default')
330 default = defaultpath or sourcerepo.ui.config('paths', 'default')
330 if default:
331 if default:
331 template = ('[paths]\n'
332 template = ('[paths]\n'
332 'default = %s\n')
333 'default = %s\n')
333 destrepo.vfs.write('hgrc', util.tonativeeol(template % default))
334 destrepo.vfs.write('hgrc', util.tonativeeol(template % default))
334
335
335 with destrepo.wlock():
336 with destrepo.wlock():
336 if bookmarks:
337 if bookmarks:
337 destrepo.vfs.write('shared', sharedbookmarks + '\n')
338 destrepo.vfs.write('shared', sharedbookmarks + '\n')
338
339
339 def _postshareupdate(repo, update, checkout=None):
340 def _postshareupdate(repo, update, checkout=None):
340 """Maybe perform a working directory update after a shared repo is created.
341 """Maybe perform a working directory update after a shared repo is created.
341
342
342 ``update`` can be a boolean or a revision to update to.
343 ``update`` can be a boolean or a revision to update to.
343 """
344 """
344 if not update:
345 if not update:
345 return
346 return
346
347
347 repo.ui.status(_("updating working directory\n"))
348 repo.ui.status(_("updating working directory\n"))
348 if update is not True:
349 if update is not True:
349 checkout = update
350 checkout = update
350 for test in (checkout, 'default', 'tip'):
351 for test in (checkout, 'default', 'tip'):
351 if test is None:
352 if test is None:
352 continue
353 continue
353 try:
354 try:
354 uprev = repo.lookup(test)
355 uprev = repo.lookup(test)
355 break
356 break
356 except error.RepoLookupError:
357 except error.RepoLookupError:
357 continue
358 continue
358 _update(repo, uprev)
359 _update(repo, uprev)
359
360
360 def copystore(ui, srcrepo, destpath):
361 def copystore(ui, srcrepo, destpath):
361 '''copy files from store of srcrepo in destpath
362 '''copy files from store of srcrepo in destpath
362
363
363 returns destlock
364 returns destlock
364 '''
365 '''
365 destlock = None
366 destlock = None
366 try:
367 try:
367 hardlink = None
368 hardlink = None
368 num = 0
369 num = 0
369 closetopic = [None]
370 closetopic = [None]
370 def prog(topic, pos):
371 def prog(topic, pos):
371 if pos is None:
372 if pos is None:
372 closetopic[0] = topic
373 closetopic[0] = topic
373 else:
374 else:
374 ui.progress(topic, pos + num)
375 ui.progress(topic, pos + num)
375 srcpublishing = srcrepo.publishing()
376 srcpublishing = srcrepo.publishing()
376 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
377 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
377 dstvfs = vfsmod.vfs(destpath)
378 dstvfs = vfsmod.vfs(destpath)
378 for f in srcrepo.store.copylist():
379 for f in srcrepo.store.copylist():
379 if srcpublishing and f.endswith('phaseroots'):
380 if srcpublishing and f.endswith('phaseroots'):
380 continue
381 continue
381 dstbase = os.path.dirname(f)
382 dstbase = os.path.dirname(f)
382 if dstbase and not dstvfs.exists(dstbase):
383 if dstbase and not dstvfs.exists(dstbase):
383 dstvfs.mkdir(dstbase)
384 dstvfs.mkdir(dstbase)
384 if srcvfs.exists(f):
385 if srcvfs.exists(f):
385 if f.endswith('data'):
386 if f.endswith('data'):
386 # 'dstbase' may be empty (e.g. revlog format 0)
387 # 'dstbase' may be empty (e.g. revlog format 0)
387 lockfile = os.path.join(dstbase, "lock")
388 lockfile = os.path.join(dstbase, "lock")
388 # lock to avoid premature writing to the target
389 # lock to avoid premature writing to the target
389 destlock = lock.lock(dstvfs, lockfile)
390 destlock = lock.lock(dstvfs, lockfile)
390 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
391 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
391 hardlink, progress=prog)
392 hardlink, progress=prog)
392 num += n
393 num += n
393 if hardlink:
394 if hardlink:
394 ui.debug("linked %d files\n" % num)
395 ui.debug("linked %d files\n" % num)
395 if closetopic[0]:
396 if closetopic[0]:
396 ui.progress(closetopic[0], None)
397 ui.progress(closetopic[0], None)
397 else:
398 else:
398 ui.debug("copied %d files\n" % num)
399 ui.debug("copied %d files\n" % num)
399 if closetopic[0]:
400 if closetopic[0]:
400 ui.progress(closetopic[0], None)
401 ui.progress(closetopic[0], None)
401 return destlock
402 return destlock
402 except: # re-raises
403 except: # re-raises
403 release(destlock)
404 release(destlock)
404 raise
405 raise
405
406
406 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
407 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
407 rev=None, update=True, stream=False):
408 rev=None, update=True, stream=False):
408 """Perform a clone using a shared repo.
409 """Perform a clone using a shared repo.
409
410
410 The store for the repository will be located at <sharepath>/.hg. The
411 The store for the repository will be located at <sharepath>/.hg. The
411 specified revisions will be cloned or pulled from "source". A shared repo
412 specified revisions will be cloned or pulled from "source". A shared repo
412 will be created at "dest" and a working copy will be created if "update" is
413 will be created at "dest" and a working copy will be created if "update" is
413 True.
414 True.
414 """
415 """
415 revs = None
416 revs = None
416 if rev:
417 if rev:
417 if not srcpeer.capable('lookup'):
418 if not srcpeer.capable('lookup'):
418 raise error.Abort(_("src repository does not support "
419 raise error.Abort(_("src repository does not support "
419 "revision lookup and so doesn't "
420 "revision lookup and so doesn't "
420 "support clone by revision"))
421 "support clone by revision"))
421 revs = [srcpeer.lookup(r) for r in rev]
422 revs = [srcpeer.lookup(r) for r in rev]
422
423
423 # Obtain a lock before checking for or cloning the pooled repo otherwise
424 # Obtain a lock before checking for or cloning the pooled repo otherwise
424 # 2 clients may race creating or populating it.
425 # 2 clients may race creating or populating it.
425 pooldir = os.path.dirname(sharepath)
426 pooldir = os.path.dirname(sharepath)
426 # lock class requires the directory to exist.
427 # lock class requires the directory to exist.
427 try:
428 try:
428 util.makedir(pooldir, False)
429 util.makedir(pooldir, False)
429 except OSError as e:
430 except OSError as e:
430 if e.errno != errno.EEXIST:
431 if e.errno != errno.EEXIST:
431 raise
432 raise
432
433
433 poolvfs = vfsmod.vfs(pooldir)
434 poolvfs = vfsmod.vfs(pooldir)
434 basename = os.path.basename(sharepath)
435 basename = os.path.basename(sharepath)
435
436
436 with lock.lock(poolvfs, '%s.lock' % basename):
437 with lock.lock(poolvfs, '%s.lock' % basename):
437 if os.path.exists(sharepath):
438 if os.path.exists(sharepath):
438 ui.status(_('(sharing from existing pooled repository %s)\n') %
439 ui.status(_('(sharing from existing pooled repository %s)\n') %
439 basename)
440 basename)
440 else:
441 else:
441 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
442 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
442 # Always use pull mode because hardlinks in share mode don't work
443 # Always use pull mode because hardlinks in share mode don't work
443 # well. Never update because working copies aren't necessary in
444 # well. Never update because working copies aren't necessary in
444 # share mode.
445 # share mode.
445 clone(ui, peeropts, source, dest=sharepath, pull=True,
446 clone(ui, peeropts, source, dest=sharepath, pull=True,
446 rev=rev, update=False, stream=stream)
447 rev=rev, update=False, stream=stream)
447
448
448 # Resolve the value to put in [paths] section for the source.
449 # Resolve the value to put in [paths] section for the source.
449 if islocal(source):
450 if islocal(source):
450 defaultpath = os.path.abspath(util.urllocalpath(source))
451 defaultpath = os.path.abspath(util.urllocalpath(source))
451 else:
452 else:
452 defaultpath = source
453 defaultpath = source
453
454
454 sharerepo = repository(ui, path=sharepath)
455 sharerepo = repository(ui, path=sharepath)
455 share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
456 share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
456 defaultpath=defaultpath)
457 defaultpath=defaultpath)
457
458
458 # We need to perform a pull against the dest repo to fetch bookmarks
459 # We need to perform a pull against the dest repo to fetch bookmarks
459 # and other non-store data that isn't shared by default. In the case of
460 # and other non-store data that isn't shared by default. In the case of
460 # non-existing shared repo, this means we pull from the remote twice. This
461 # non-existing shared repo, this means we pull from the remote twice. This
461 # is a bit weird. But at the time it was implemented, there wasn't an easy
462 # is a bit weird. But at the time it was implemented, there wasn't an easy
462 # way to pull just non-changegroup data.
463 # way to pull just non-changegroup data.
463 destrepo = repository(ui, path=dest)
464 destrepo = repository(ui, path=dest)
464 exchange.pull(destrepo, srcpeer, heads=revs)
465 exchange.pull(destrepo, srcpeer, heads=revs)
465
466
466 _postshareupdate(destrepo, update)
467 _postshareupdate(destrepo, update)
467
468
468 return srcpeer, peer(ui, peeropts, dest)
469 return srcpeer, peer(ui, peeropts, dest)
469
470
470 # Recomputing branch cache might be slow on big repos,
471 # Recomputing branch cache might be slow on big repos,
471 # so just copy it
472 # so just copy it
472 def _copycache(srcrepo, dstcachedir, fname):
473 def _copycache(srcrepo, dstcachedir, fname):
473 """copy a cache from srcrepo to destcachedir (if it exists)"""
474 """copy a cache from srcrepo to destcachedir (if it exists)"""
474 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
475 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
475 dstbranchcache = os.path.join(dstcachedir, fname)
476 dstbranchcache = os.path.join(dstcachedir, fname)
476 if os.path.exists(srcbranchcache):
477 if os.path.exists(srcbranchcache):
477 if not os.path.exists(dstcachedir):
478 if not os.path.exists(dstcachedir):
478 os.mkdir(dstcachedir)
479 os.mkdir(dstcachedir)
479 util.copyfile(srcbranchcache, dstbranchcache)
480 util.copyfile(srcbranchcache, dstbranchcache)
480
481
481 def clone(ui, peeropts, source, dest=None, pull=False, rev=None,
482 def clone(ui, peeropts, source, dest=None, pull=False, rev=None,
482 update=True, stream=False, branch=None, shareopts=None):
483 update=True, stream=False, branch=None, shareopts=None):
483 """Make a copy of an existing repository.
484 """Make a copy of an existing repository.
484
485
485 Create a copy of an existing repository in a new directory. The
486 Create a copy of an existing repository in a new directory. The
486 source and destination are URLs, as passed to the repository
487 source and destination are URLs, as passed to the repository
487 function. Returns a pair of repository peers, the source and
488 function. Returns a pair of repository peers, the source and
488 newly created destination.
489 newly created destination.
489
490
490 The location of the source is added to the new repository's
491 The location of the source is added to the new repository's
491 .hg/hgrc file, as the default to be used for future pulls and
492 .hg/hgrc file, as the default to be used for future pulls and
492 pushes.
493 pushes.
493
494
494 If an exception is raised, the partly cloned/updated destination
495 If an exception is raised, the partly cloned/updated destination
495 repository will be deleted.
496 repository will be deleted.
496
497
497 Arguments:
498 Arguments:
498
499
499 source: repository object or URL
500 source: repository object or URL
500
501
501 dest: URL of destination repository to create (defaults to base
502 dest: URL of destination repository to create (defaults to base
502 name of source repository)
503 name of source repository)
503
504
504 pull: always pull from source repository, even in local case or if the
505 pull: always pull from source repository, even in local case or if the
505 server prefers streaming
506 server prefers streaming
506
507
507 stream: stream raw data uncompressed from repository (fast over
508 stream: stream raw data uncompressed from repository (fast over
508 LAN, slow over WAN)
509 LAN, slow over WAN)
509
510
510 rev: revision to clone up to (implies pull=True)
511 rev: revision to clone up to (implies pull=True)
511
512
512 update: update working directory after clone completes, if
513 update: update working directory after clone completes, if
513 destination is local repository (True means update to default rev,
514 destination is local repository (True means update to default rev,
514 anything else is treated as a revision)
515 anything else is treated as a revision)
515
516
516 branch: branches to clone
517 branch: branches to clone
517
518
518 shareopts: dict of options to control auto sharing behavior. The "pool" key
519 shareopts: dict of options to control auto sharing behavior. The "pool" key
519 activates auto sharing mode and defines the directory for stores. The
520 activates auto sharing mode and defines the directory for stores. The
520 "mode" key determines how to construct the directory name of the shared
521 "mode" key determines how to construct the directory name of the shared
521 repository. "identity" means the name is derived from the node of the first
522 repository. "identity" means the name is derived from the node of the first
522 changeset in the repository. "remote" means the name is derived from the
523 changeset in the repository. "remote" means the name is derived from the
523 remote's path/URL. Defaults to "identity."
524 remote's path/URL. Defaults to "identity."
524 """
525 """
525
526
526 if isinstance(source, bytes):
527 if isinstance(source, bytes):
527 origsource = ui.expandpath(source)
528 origsource = ui.expandpath(source)
528 source, branch = parseurl(origsource, branch)
529 source, branch = parseurl(origsource, branch)
529 srcpeer = peer(ui, peeropts, source)
530 srcpeer = peer(ui, peeropts, source)
530 else:
531 else:
531 srcpeer = source.peer() # in case we were called with a localrepo
532 srcpeer = source.peer() # in case we were called with a localrepo
532 branch = (None, branch or [])
533 branch = (None, branch or [])
533 origsource = source = srcpeer.url()
534 origsource = source = srcpeer.url()
534 rev, checkout = addbranchrevs(srcpeer, srcpeer, branch, rev)
535 rev, checkout = addbranchrevs(srcpeer, srcpeer, branch, rev)
535
536
536 if dest is None:
537 if dest is None:
537 dest = defaultdest(source)
538 dest = defaultdest(source)
538 if dest:
539 if dest:
539 ui.status(_("destination directory: %s\n") % dest)
540 ui.status(_("destination directory: %s\n") % dest)
540 else:
541 else:
541 dest = ui.expandpath(dest)
542 dest = ui.expandpath(dest)
542
543
543 dest = util.urllocalpath(dest)
544 dest = util.urllocalpath(dest)
544 source = util.urllocalpath(source)
545 source = util.urllocalpath(source)
545
546
546 if not dest:
547 if not dest:
547 raise error.Abort(_("empty destination path is not valid"))
548 raise error.Abort(_("empty destination path is not valid"))
548
549
549 destvfs = vfsmod.vfs(dest, expandpath=True)
550 destvfs = vfsmod.vfs(dest, expandpath=True)
550 if destvfs.lexists():
551 if destvfs.lexists():
551 if not destvfs.isdir():
552 if not destvfs.isdir():
552 raise error.Abort(_("destination '%s' already exists") % dest)
553 raise error.Abort(_("destination '%s' already exists") % dest)
553 elif destvfs.listdir():
554 elif destvfs.listdir():
554 raise error.Abort(_("destination '%s' is not empty") % dest)
555 raise error.Abort(_("destination '%s' is not empty") % dest)
555
556
556 shareopts = shareopts or {}
557 shareopts = shareopts or {}
557 sharepool = shareopts.get('pool')
558 sharepool = shareopts.get('pool')
558 sharenamemode = shareopts.get('mode')
559 sharenamemode = shareopts.get('mode')
559 if sharepool and islocal(dest):
560 if sharepool and islocal(dest):
560 sharepath = None
561 sharepath = None
561 if sharenamemode == 'identity':
562 if sharenamemode == 'identity':
562 # Resolve the name from the initial changeset in the remote
563 # Resolve the name from the initial changeset in the remote
563 # repository. This returns nullid when the remote is empty. It
564 # repository. This returns nullid when the remote is empty. It
564 # raises RepoLookupError if revision 0 is filtered or otherwise
565 # raises RepoLookupError if revision 0 is filtered or otherwise
565 # not available. If we fail to resolve, sharing is not enabled.
566 # not available. If we fail to resolve, sharing is not enabled.
566 try:
567 try:
567 rootnode = srcpeer.lookup('0')
568 rootnode = srcpeer.lookup('0')
568 if rootnode != node.nullid:
569 if rootnode != node.nullid:
569 sharepath = os.path.join(sharepool, node.hex(rootnode))
570 sharepath = os.path.join(sharepool, node.hex(rootnode))
570 else:
571 else:
571 ui.status(_('(not using pooled storage: '
572 ui.status(_('(not using pooled storage: '
572 'remote appears to be empty)\n'))
573 'remote appears to be empty)\n'))
573 except error.RepoLookupError:
574 except error.RepoLookupError:
574 ui.status(_('(not using pooled storage: '
575 ui.status(_('(not using pooled storage: '
575 'unable to resolve identity of remote)\n'))
576 'unable to resolve identity of remote)\n'))
576 elif sharenamemode == 'remote':
577 elif sharenamemode == 'remote':
577 sharepath = os.path.join(
578 sharepath = os.path.join(
578 sharepool, node.hex(hashlib.sha1(source).digest()))
579 sharepool, node.hex(hashlib.sha1(source).digest()))
579 else:
580 else:
580 raise error.Abort(_('unknown share naming mode: %s') %
581 raise error.Abort(_('unknown share naming mode: %s') %
581 sharenamemode)
582 sharenamemode)
582
583
583 if sharepath:
584 if sharepath:
584 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
585 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
585 dest, pull=pull, rev=rev, update=update,
586 dest, pull=pull, rev=rev, update=update,
586 stream=stream)
587 stream=stream)
587
588
588 srclock = destlock = cleandir = None
589 srclock = destlock = cleandir = None
589 srcrepo = srcpeer.local()
590 srcrepo = srcpeer.local()
590 try:
591 try:
591 abspath = origsource
592 abspath = origsource
592 if islocal(origsource):
593 if islocal(origsource):
593 abspath = os.path.abspath(util.urllocalpath(origsource))
594 abspath = os.path.abspath(util.urllocalpath(origsource))
594
595
595 if islocal(dest):
596 if islocal(dest):
596 cleandir = dest
597 cleandir = dest
597
598
598 copy = False
599 copy = False
599 if (srcrepo and srcrepo.cancopy() and islocal(dest)
600 if (srcrepo and srcrepo.cancopy() and islocal(dest)
600 and not phases.hassecret(srcrepo)):
601 and not phases.hassecret(srcrepo)):
601 copy = not pull and not rev
602 copy = not pull and not rev
602
603
603 if copy:
604 if copy:
604 try:
605 try:
605 # we use a lock here because if we race with commit, we
606 # we use a lock here because if we race with commit, we
606 # can end up with extra data in the cloned revlogs that's
607 # can end up with extra data in the cloned revlogs that's
607 # not pointed to by changesets, thus causing verify to
608 # not pointed to by changesets, thus causing verify to
608 # fail
609 # fail
609 srclock = srcrepo.lock(wait=False)
610 srclock = srcrepo.lock(wait=False)
610 except error.LockError:
611 except error.LockError:
611 copy = False
612 copy = False
612
613
613 if copy:
614 if copy:
614 srcrepo.hook('preoutgoing', throw=True, source='clone')
615 srcrepo.hook('preoutgoing', throw=True, source='clone')
615 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
616 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
616 if not os.path.exists(dest):
617 if not os.path.exists(dest):
617 os.mkdir(dest)
618 os.mkdir(dest)
618 else:
619 else:
619 # only clean up directories we create ourselves
620 # only clean up directories we create ourselves
620 cleandir = hgdir
621 cleandir = hgdir
621 try:
622 try:
622 destpath = hgdir
623 destpath = hgdir
623 util.makedir(destpath, notindexed=True)
624 util.makedir(destpath, notindexed=True)
624 except OSError as inst:
625 except OSError as inst:
625 if inst.errno == errno.EEXIST:
626 if inst.errno == errno.EEXIST:
626 cleandir = None
627 cleandir = None
627 raise error.Abort(_("destination '%s' already exists")
628 raise error.Abort(_("destination '%s' already exists")
628 % dest)
629 % dest)
629 raise
630 raise
630
631
631 destlock = copystore(ui, srcrepo, destpath)
632 destlock = copystore(ui, srcrepo, destpath)
632 # copy bookmarks over
633 # copy bookmarks over
633 srcbookmarks = srcrepo.vfs.join('bookmarks')
634 srcbookmarks = srcrepo.vfs.join('bookmarks')
634 dstbookmarks = os.path.join(destpath, 'bookmarks')
635 dstbookmarks = os.path.join(destpath, 'bookmarks')
635 if os.path.exists(srcbookmarks):
636 if os.path.exists(srcbookmarks):
636 util.copyfile(srcbookmarks, dstbookmarks)
637 util.copyfile(srcbookmarks, dstbookmarks)
637
638
638 dstcachedir = os.path.join(destpath, 'cache')
639 dstcachedir = os.path.join(destpath, 'cache')
639 for cache in cacheutil.cachetocopy(srcrepo):
640 for cache in cacheutil.cachetocopy(srcrepo):
640 _copycache(srcrepo, dstcachedir, cache)
641 _copycache(srcrepo, dstcachedir, cache)
641
642
642 # we need to re-init the repo after manually copying the data
643 # we need to re-init the repo after manually copying the data
643 # into it
644 # into it
644 destpeer = peer(srcrepo, peeropts, dest)
645 destpeer = peer(srcrepo, peeropts, dest)
645 srcrepo.hook('outgoing', source='clone',
646 srcrepo.hook('outgoing', source='clone',
646 node=node.hex(node.nullid))
647 node=node.hex(node.nullid))
647 else:
648 else:
648 try:
649 try:
649 destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
650 destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
650 # only pass ui when no srcrepo
651 # only pass ui when no srcrepo
651 except OSError as inst:
652 except OSError as inst:
652 if inst.errno == errno.EEXIST:
653 if inst.errno == errno.EEXIST:
653 cleandir = None
654 cleandir = None
654 raise error.Abort(_("destination '%s' already exists")
655 raise error.Abort(_("destination '%s' already exists")
655 % dest)
656 % dest)
656 raise
657 raise
657
658
658 revs = None
659 revs = None
659 if rev:
660 if rev:
660 if not srcpeer.capable('lookup'):
661 if not srcpeer.capable('lookup'):
661 raise error.Abort(_("src repository does not support "
662 raise error.Abort(_("src repository does not support "
662 "revision lookup and so doesn't "
663 "revision lookup and so doesn't "
663 "support clone by revision"))
664 "support clone by revision"))
664 revs = [srcpeer.lookup(r) for r in rev]
665 revs = [srcpeer.lookup(r) for r in rev]
665 checkout = revs[0]
666 checkout = revs[0]
666 local = destpeer.local()
667 local = destpeer.local()
667 if local:
668 if local:
668 u = util.url(abspath)
669 u = util.url(abspath)
669 defaulturl = bytes(u)
670 defaulturl = bytes(u)
670 local.ui.setconfig('paths', 'default', defaulturl, 'clone')
671 local.ui.setconfig('paths', 'default', defaulturl, 'clone')
671 if not stream:
672 if not stream:
672 if pull:
673 if pull:
673 stream = False
674 stream = False
674 else:
675 else:
675 stream = None
676 stream = None
676 # internal config: ui.quietbookmarkmove
677 # internal config: ui.quietbookmarkmove
677 overrides = {('ui', 'quietbookmarkmove'): True}
678 overrides = {('ui', 'quietbookmarkmove'): True}
678 with local.ui.configoverride(overrides, 'clone'):
679 with local.ui.configoverride(overrides, 'clone'):
679 exchange.pull(local, srcpeer, revs,
680 exchange.pull(local, srcpeer, revs,
680 streamclonerequested=stream)
681 streamclonerequested=stream)
681 elif srcrepo:
682 elif srcrepo:
682 exchange.push(srcrepo, destpeer, revs=revs,
683 exchange.push(srcrepo, destpeer, revs=revs,
683 bookmarks=srcrepo._bookmarks.keys())
684 bookmarks=srcrepo._bookmarks.keys())
684 else:
685 else:
685 raise error.Abort(_("clone from remote to remote not supported")
686 raise error.Abort(_("clone from remote to remote not supported")
686 )
687 )
687
688
688 cleandir = None
689 cleandir = None
689
690
690 destrepo = destpeer.local()
691 destrepo = destpeer.local()
691 if destrepo:
692 if destrepo:
692 template = uimod.samplehgrcs['cloned']
693 template = uimod.samplehgrcs['cloned']
693 u = util.url(abspath)
694 u = util.url(abspath)
694 u.passwd = None
695 u.passwd = None
695 defaulturl = bytes(u)
696 defaulturl = bytes(u)
696 destrepo.vfs.write('hgrc', util.tonativeeol(template % defaulturl))
697 destrepo.vfs.write('hgrc', util.tonativeeol(template % defaulturl))
697 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
698 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
698
699
699 if ui.configbool('experimental', 'remotenames'):
700 if ui.configbool('experimental', 'remotenames'):
700 logexchange.pullremotenames(destrepo, srcpeer)
701 logexchange.pullremotenames(destrepo, srcpeer)
701
702
702 if update:
703 if update:
703 if update is not True:
704 if update is not True:
704 checkout = srcpeer.lookup(update)
705 checkout = srcpeer.lookup(update)
705 uprev = None
706 uprev = None
706 status = None
707 status = None
707 if checkout is not None:
708 if checkout is not None:
708 try:
709 try:
709 uprev = destrepo.lookup(checkout)
710 uprev = destrepo.lookup(checkout)
710 except error.RepoLookupError:
711 except error.RepoLookupError:
711 if update is not True:
712 if update is not True:
712 try:
713 try:
713 uprev = destrepo.lookup(update)
714 uprev = destrepo.lookup(update)
714 except error.RepoLookupError:
715 except error.RepoLookupError:
715 pass
716 pass
716 if uprev is None:
717 if uprev is None:
717 try:
718 try:
718 uprev = destrepo._bookmarks['@']
719 uprev = destrepo._bookmarks['@']
719 update = '@'
720 update = '@'
720 bn = destrepo[uprev].branch()
721 bn = destrepo[uprev].branch()
721 if bn == 'default':
722 if bn == 'default':
722 status = _("updating to bookmark @\n")
723 status = _("updating to bookmark @\n")
723 else:
724 else:
724 status = (_("updating to bookmark @ on branch %s\n")
725 status = (_("updating to bookmark @ on branch %s\n")
725 % bn)
726 % bn)
726 except KeyError:
727 except KeyError:
727 try:
728 try:
728 uprev = destrepo.branchtip('default')
729 uprev = destrepo.branchtip('default')
729 except error.RepoLookupError:
730 except error.RepoLookupError:
730 uprev = destrepo.lookup('tip')
731 uprev = destrepo.lookup('tip')
731 if not status:
732 if not status:
732 bn = destrepo[uprev].branch()
733 bn = destrepo[uprev].branch()
733 status = _("updating to branch %s\n") % bn
734 status = _("updating to branch %s\n") % bn
734 destrepo.ui.status(status)
735 destrepo.ui.status(status)
735 _update(destrepo, uprev)
736 _update(destrepo, uprev)
736 if update in destrepo._bookmarks:
737 if update in destrepo._bookmarks:
737 bookmarks.activate(destrepo, update)
738 bookmarks.activate(destrepo, update)
738 finally:
739 finally:
739 release(srclock, destlock)
740 release(srclock, destlock)
740 if cleandir is not None:
741 if cleandir is not None:
741 shutil.rmtree(cleandir, True)
742 shutil.rmtree(cleandir, True)
742 if srcpeer is not None:
743 if srcpeer is not None:
743 srcpeer.close()
744 srcpeer.close()
744 return srcpeer, destpeer
745 return srcpeer, destpeer
745
746
746 def _showstats(repo, stats, quietempty=False):
747 def _showstats(repo, stats, quietempty=False):
747 if quietempty and not any(stats):
748 if quietempty and not any(stats):
748 return
749 return
749 repo.ui.status(_("%d files updated, %d files merged, "
750 repo.ui.status(_("%d files updated, %d files merged, "
750 "%d files removed, %d files unresolved\n") % stats)
751 "%d files removed, %d files unresolved\n") % stats)
751
752
752 def updaterepo(repo, node, overwrite, updatecheck=None):
753 def updaterepo(repo, node, overwrite, updatecheck=None):
753 """Update the working directory to node.
754 """Update the working directory to node.
754
755
755 When overwrite is set, changes are clobbered, merged else
756 When overwrite is set, changes are clobbered, merged else
756
757
757 returns stats (see pydoc mercurial.merge.applyupdates)"""
758 returns stats (see pydoc mercurial.merge.applyupdates)"""
758 return mergemod.update(repo, node, False, overwrite,
759 return mergemod.update(repo, node, False, overwrite,
759 labels=['working copy', 'destination'],
760 labels=['working copy', 'destination'],
760 updatecheck=updatecheck)
761 updatecheck=updatecheck)
761
762
762 def update(repo, node, quietempty=False, updatecheck=None):
763 def update(repo, node, quietempty=False, updatecheck=None):
763 """update the working directory to node"""
764 """update the working directory to node"""
764 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
765 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
765 _showstats(repo, stats, quietempty)
766 _showstats(repo, stats, quietempty)
766 if stats[3]:
767 if stats[3]:
767 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
768 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
768 return stats[3] > 0
769 return stats[3] > 0
769
770
770 # naming conflict in clone()
771 # naming conflict in clone()
771 _update = update
772 _update = update
772
773
773 def clean(repo, node, show_stats=True, quietempty=False):
774 def clean(repo, node, show_stats=True, quietempty=False):
774 """forcibly switch the working directory to node, clobbering changes"""
775 """forcibly switch the working directory to node, clobbering changes"""
775 stats = updaterepo(repo, node, True)
776 stats = updaterepo(repo, node, True)
776 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
777 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
777 if show_stats:
778 if show_stats:
778 _showstats(repo, stats, quietempty)
779 _showstats(repo, stats, quietempty)
779 return stats[3] > 0
780 return stats[3] > 0
780
781
781 # naming conflict in updatetotally()
782 # naming conflict in updatetotally()
782 _clean = clean
783 _clean = clean
783
784
784 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
785 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
785 """Update the working directory with extra care for non-file components
786 """Update the working directory with extra care for non-file components
786
787
787 This takes care of non-file components below:
788 This takes care of non-file components below:
788
789
789 :bookmark: might be advanced or (in)activated
790 :bookmark: might be advanced or (in)activated
790
791
791 This takes arguments below:
792 This takes arguments below:
792
793
793 :checkout: to which revision the working directory is updated
794 :checkout: to which revision the working directory is updated
794 :brev: a name, which might be a bookmark to be activated after updating
795 :brev: a name, which might be a bookmark to be activated after updating
795 :clean: whether changes in the working directory can be discarded
796 :clean: whether changes in the working directory can be discarded
796 :updatecheck: how to deal with a dirty working directory
797 :updatecheck: how to deal with a dirty working directory
797
798
798 Valid values for updatecheck are (None => linear):
799 Valid values for updatecheck are (None => linear):
799
800
800 * abort: abort if the working directory is dirty
801 * abort: abort if the working directory is dirty
801 * none: don't check (merge working directory changes into destination)
802 * none: don't check (merge working directory changes into destination)
802 * linear: check that update is linear before merging working directory
803 * linear: check that update is linear before merging working directory
803 changes into destination
804 changes into destination
804 * noconflict: check that the update does not result in file merges
805 * noconflict: check that the update does not result in file merges
805
806
806 This returns whether conflict is detected at updating or not.
807 This returns whether conflict is detected at updating or not.
807 """
808 """
808 if updatecheck is None:
809 if updatecheck is None:
809 updatecheck = ui.config('commands', 'update.check')
810 updatecheck = ui.config('commands', 'update.check')
810 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
811 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
811 # If not configured, or invalid value configured
812 # If not configured, or invalid value configured
812 updatecheck = 'linear'
813 updatecheck = 'linear'
813 with repo.wlock():
814 with repo.wlock():
814 movemarkfrom = None
815 movemarkfrom = None
815 warndest = False
816 warndest = False
816 if checkout is None:
817 if checkout is None:
817 updata = destutil.destupdate(repo, clean=clean)
818 updata = destutil.destupdate(repo, clean=clean)
818 checkout, movemarkfrom, brev = updata
819 checkout, movemarkfrom, brev = updata
819 warndest = True
820 warndest = True
820
821
821 if clean:
822 if clean:
822 ret = _clean(repo, checkout)
823 ret = _clean(repo, checkout)
823 else:
824 else:
824 if updatecheck == 'abort':
825 if updatecheck == 'abort':
825 cmdutil.bailifchanged(repo, merge=False)
826 cmdutil.bailifchanged(repo, merge=False)
826 updatecheck = 'none'
827 updatecheck = 'none'
827 ret = _update(repo, checkout, updatecheck=updatecheck)
828 ret = _update(repo, checkout, updatecheck=updatecheck)
828
829
829 if not ret and movemarkfrom:
830 if not ret and movemarkfrom:
830 if movemarkfrom == repo['.'].node():
831 if movemarkfrom == repo['.'].node():
831 pass # no-op update
832 pass # no-op update
832 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
833 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
833 b = ui.label(repo._activebookmark, 'bookmarks.active')
834 b = ui.label(repo._activebookmark, 'bookmarks.active')
834 ui.status(_("updating bookmark %s\n") % b)
835 ui.status(_("updating bookmark %s\n") % b)
835 else:
836 else:
836 # this can happen with a non-linear update
837 # this can happen with a non-linear update
837 b = ui.label(repo._activebookmark, 'bookmarks')
838 b = ui.label(repo._activebookmark, 'bookmarks')
838 ui.status(_("(leaving bookmark %s)\n") % b)
839 ui.status(_("(leaving bookmark %s)\n") % b)
839 bookmarks.deactivate(repo)
840 bookmarks.deactivate(repo)
840 elif brev in repo._bookmarks:
841 elif brev in repo._bookmarks:
841 if brev != repo._activebookmark:
842 if brev != repo._activebookmark:
842 b = ui.label(brev, 'bookmarks.active')
843 b = ui.label(brev, 'bookmarks.active')
843 ui.status(_("(activating bookmark %s)\n") % b)
844 ui.status(_("(activating bookmark %s)\n") % b)
844 bookmarks.activate(repo, brev)
845 bookmarks.activate(repo, brev)
845 elif brev:
846 elif brev:
846 if repo._activebookmark:
847 if repo._activebookmark:
847 b = ui.label(repo._activebookmark, 'bookmarks')
848 b = ui.label(repo._activebookmark, 'bookmarks')
848 ui.status(_("(leaving bookmark %s)\n") % b)
849 ui.status(_("(leaving bookmark %s)\n") % b)
849 bookmarks.deactivate(repo)
850 bookmarks.deactivate(repo)
850
851
851 if warndest:
852 if warndest:
852 destutil.statusotherdests(ui, repo)
853 destutil.statusotherdests(ui, repo)
853
854
854 return ret
855 return ret
855
856
856 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None,
857 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None,
857 abort=False):
858 abort=False):
858 """Branch merge with node, resolving changes. Return true if any
859 """Branch merge with node, resolving changes. Return true if any
859 unresolved conflicts."""
860 unresolved conflicts."""
860 if not abort:
861 if not abort:
861 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce,
862 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce,
862 labels=labels)
863 labels=labels)
863 else:
864 else:
864 ms = mergemod.mergestate.read(repo)
865 ms = mergemod.mergestate.read(repo)
865 if ms.active():
866 if ms.active():
866 # there were conflicts
867 # there were conflicts
867 node = ms.localctx.hex()
868 node = ms.localctx.hex()
868 else:
869 else:
869 # there were no conficts, mergestate was not stored
870 # there were no conficts, mergestate was not stored
870 node = repo['.'].hex()
871 node = repo['.'].hex()
871
872
872 repo.ui.status(_("aborting the merge, updating back to"
873 repo.ui.status(_("aborting the merge, updating back to"
873 " %s\n") % node[:12])
874 " %s\n") % node[:12])
874 stats = mergemod.update(repo, node, branchmerge=False, force=True,
875 stats = mergemod.update(repo, node, branchmerge=False, force=True,
875 labels=labels)
876 labels=labels)
876
877
877 _showstats(repo, stats)
878 _showstats(repo, stats)
878 if stats[3]:
879 if stats[3]:
879 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
880 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
880 "or 'hg merge --abort' to abandon\n"))
881 "or 'hg merge --abort' to abandon\n"))
881 elif remind and not abort:
882 elif remind and not abort:
882 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
883 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
883 return stats[3] > 0
884 return stats[3] > 0
884
885
885 def _incoming(displaychlist, subreporecurse, ui, repo, source,
886 def _incoming(displaychlist, subreporecurse, ui, repo, source,
886 opts, buffered=False):
887 opts, buffered=False):
887 """
888 """
888 Helper for incoming / gincoming.
889 Helper for incoming / gincoming.
889 displaychlist gets called with
890 displaychlist gets called with
890 (remoterepo, incomingchangesetlist, displayer) parameters,
891 (remoterepo, incomingchangesetlist, displayer) parameters,
891 and is supposed to contain only code that can't be unified.
892 and is supposed to contain only code that can't be unified.
892 """
893 """
893 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
894 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
894 other = peer(repo, opts, source)
895 other = peer(repo, opts, source)
895 ui.status(_('comparing with %s\n') % util.hidepassword(source))
896 ui.status(_('comparing with %s\n') % util.hidepassword(source))
896 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
897 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
897
898
898 if revs:
899 if revs:
899 revs = [other.lookup(rev) for rev in revs]
900 revs = [other.lookup(rev) for rev in revs]
900 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
901 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
901 revs, opts["bundle"], opts["force"])
902 revs, opts["bundle"], opts["force"])
902 try:
903 try:
903 if not chlist:
904 if not chlist:
904 ui.status(_("no changes found\n"))
905 ui.status(_("no changes found\n"))
905 return subreporecurse()
906 return subreporecurse()
906 ui.pager('incoming')
907 ui.pager('incoming')
907 displayer = logcmdutil.changesetdisplayer(ui, other, opts,
908 displayer = logcmdutil.changesetdisplayer(ui, other, opts,
908 buffered=buffered)
909 buffered=buffered)
909 displaychlist(other, chlist, displayer)
910 displaychlist(other, chlist, displayer)
910 displayer.close()
911 displayer.close()
911 finally:
912 finally:
912 cleanupfn()
913 cleanupfn()
913 subreporecurse()
914 subreporecurse()
914 return 0 # exit code is zero since we found incoming changes
915 return 0 # exit code is zero since we found incoming changes
915
916
916 def incoming(ui, repo, source, opts):
917 def incoming(ui, repo, source, opts):
917 def subreporecurse():
918 def subreporecurse():
918 ret = 1
919 ret = 1
919 if opts.get('subrepos'):
920 if opts.get('subrepos'):
920 ctx = repo[None]
921 ctx = repo[None]
921 for subpath in sorted(ctx.substate):
922 for subpath in sorted(ctx.substate):
922 sub = ctx.sub(subpath)
923 sub = ctx.sub(subpath)
923 ret = min(ret, sub.incoming(ui, source, opts))
924 ret = min(ret, sub.incoming(ui, source, opts))
924 return ret
925 return ret
925
926
926 def display(other, chlist, displayer):
927 def display(other, chlist, displayer):
927 limit = logcmdutil.getlimit(opts)
928 limit = logcmdutil.getlimit(opts)
928 if opts.get('newest_first'):
929 if opts.get('newest_first'):
929 chlist.reverse()
930 chlist.reverse()
930 count = 0
931 count = 0
931 for n in chlist:
932 for n in chlist:
932 if limit is not None and count >= limit:
933 if limit is not None and count >= limit:
933 break
934 break
934 parents = [p for p in other.changelog.parents(n) if p != nullid]
935 parents = [p for p in other.changelog.parents(n) if p != nullid]
935 if opts.get('no_merges') and len(parents) == 2:
936 if opts.get('no_merges') and len(parents) == 2:
936 continue
937 continue
937 count += 1
938 count += 1
938 displayer.show(other[n])
939 displayer.show(other[n])
939 return _incoming(display, subreporecurse, ui, repo, source, opts)
940 return _incoming(display, subreporecurse, ui, repo, source, opts)
940
941
941 def _outgoing(ui, repo, dest, opts):
942 def _outgoing(ui, repo, dest, opts):
942 path = ui.paths.getpath(dest, default=('default-push', 'default'))
943 path = ui.paths.getpath(dest, default=('default-push', 'default'))
943 if not path:
944 if not path:
944 raise error.Abort(_('default repository not configured!'),
945 raise error.Abort(_('default repository not configured!'),
945 hint=_("see 'hg help config.paths'"))
946 hint=_("see 'hg help config.paths'"))
946 dest = path.pushloc or path.loc
947 dest = path.pushloc or path.loc
947 branches = path.branch, opts.get('branch') or []
948 branches = path.branch, opts.get('branch') or []
948
949
949 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
950 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
950 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
951 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
951 if revs:
952 if revs:
952 revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
953 revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
953
954
954 other = peer(repo, opts, dest)
955 other = peer(repo, opts, dest)
955 outgoing = discovery.findcommonoutgoing(repo, other, revs,
956 outgoing = discovery.findcommonoutgoing(repo, other, revs,
956 force=opts.get('force'))
957 force=opts.get('force'))
957 o = outgoing.missing
958 o = outgoing.missing
958 if not o:
959 if not o:
959 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
960 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
960 return o, other
961 return o, other
961
962
962 def outgoing(ui, repo, dest, opts):
963 def outgoing(ui, repo, dest, opts):
963 def recurse():
964 def recurse():
964 ret = 1
965 ret = 1
965 if opts.get('subrepos'):
966 if opts.get('subrepos'):
966 ctx = repo[None]
967 ctx = repo[None]
967 for subpath in sorted(ctx.substate):
968 for subpath in sorted(ctx.substate):
968 sub = ctx.sub(subpath)
969 sub = ctx.sub(subpath)
969 ret = min(ret, sub.outgoing(ui, dest, opts))
970 ret = min(ret, sub.outgoing(ui, dest, opts))
970 return ret
971 return ret
971
972
972 limit = logcmdutil.getlimit(opts)
973 limit = logcmdutil.getlimit(opts)
973 o, other = _outgoing(ui, repo, dest, opts)
974 o, other = _outgoing(ui, repo, dest, opts)
974 if not o:
975 if not o:
975 cmdutil.outgoinghooks(ui, repo, other, opts, o)
976 cmdutil.outgoinghooks(ui, repo, other, opts, o)
976 return recurse()
977 return recurse()
977
978
978 if opts.get('newest_first'):
979 if opts.get('newest_first'):
979 o.reverse()
980 o.reverse()
980 ui.pager('outgoing')
981 ui.pager('outgoing')
981 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
982 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
982 count = 0
983 count = 0
983 for n in o:
984 for n in o:
984 if limit is not None and count >= limit:
985 if limit is not None and count >= limit:
985 break
986 break
986 parents = [p for p in repo.changelog.parents(n) if p != nullid]
987 parents = [p for p in repo.changelog.parents(n) if p != nullid]
987 if opts.get('no_merges') and len(parents) == 2:
988 if opts.get('no_merges') and len(parents) == 2:
988 continue
989 continue
989 count += 1
990 count += 1
990 displayer.show(repo[n])
991 displayer.show(repo[n])
991 displayer.close()
992 displayer.close()
992 cmdutil.outgoinghooks(ui, repo, other, opts, o)
993 cmdutil.outgoinghooks(ui, repo, other, opts, o)
993 recurse()
994 recurse()
994 return 0 # exit code is zero since we found outgoing changes
995 return 0 # exit code is zero since we found outgoing changes
995
996
996 def verify(repo):
997 def verify(repo):
997 """verify the consistency of a repository"""
998 """verify the consistency of a repository"""
998 ret = verifymod.verify(repo)
999 ret = verifymod.verify(repo)
999
1000
1000 # Broken subrepo references in hidden csets don't seem worth worrying about,
1001 # Broken subrepo references in hidden csets don't seem worth worrying about,
1001 # since they can't be pushed/pulled, and --hidden can be used if they are a
1002 # since they can't be pushed/pulled, and --hidden can be used if they are a
1002 # concern.
1003 # concern.
1003
1004
1004 # pathto() is needed for -R case
1005 # pathto() is needed for -R case
1005 revs = repo.revs("filelog(%s)",
1006 revs = repo.revs("filelog(%s)",
1006 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
1007 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
1007
1008
1008 if revs:
1009 if revs:
1009 repo.ui.status(_('checking subrepo links\n'))
1010 repo.ui.status(_('checking subrepo links\n'))
1010 for rev in revs:
1011 for rev in revs:
1011 ctx = repo[rev]
1012 ctx = repo[rev]
1012 try:
1013 try:
1013 for subpath in ctx.substate:
1014 for subpath in ctx.substate:
1014 try:
1015 try:
1015 ret = (ctx.sub(subpath, allowcreate=False).verify()
1016 ret = (ctx.sub(subpath, allowcreate=False).verify()
1016 or ret)
1017 or ret)
1017 except error.RepoError as e:
1018 except error.RepoError as e:
1018 repo.ui.warn(('%s: %s\n') % (rev, e))
1019 repo.ui.warn(('%s: %s\n') % (rev, e))
1019 except Exception:
1020 except Exception:
1020 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
1021 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
1021 node.short(ctx.node()))
1022 node.short(ctx.node()))
1022
1023
1023 return ret
1024 return ret
1024
1025
1025 def remoteui(src, opts):
1026 def remoteui(src, opts):
1026 'build a remote ui from ui or repo and opts'
1027 'build a remote ui from ui or repo and opts'
1027 if util.safehasattr(src, 'baseui'): # looks like a repository
1028 if util.safehasattr(src, 'baseui'): # looks like a repository
1028 dst = src.baseui.copy() # drop repo-specific config
1029 dst = src.baseui.copy() # drop repo-specific config
1029 src = src.ui # copy target options from repo
1030 src = src.ui # copy target options from repo
1030 else: # assume it's a global ui object
1031 else: # assume it's a global ui object
1031 dst = src.copy() # keep all global options
1032 dst = src.copy() # keep all global options
1032
1033
1033 # copy ssh-specific options
1034 # copy ssh-specific options
1034 for o in 'ssh', 'remotecmd':
1035 for o in 'ssh', 'remotecmd':
1035 v = opts.get(o) or src.config('ui', o)
1036 v = opts.get(o) or src.config('ui', o)
1036 if v:
1037 if v:
1037 dst.setconfig("ui", o, v, 'copied')
1038 dst.setconfig("ui", o, v, 'copied')
1038
1039
1039 # copy bundle-specific options
1040 # copy bundle-specific options
1040 r = src.config('bundle', 'mainreporoot')
1041 r = src.config('bundle', 'mainreporoot')
1041 if r:
1042 if r:
1042 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
1043 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
1043
1044
1044 # copy selected local settings to the remote ui
1045 # copy selected local settings to the remote ui
1045 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
1046 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
1046 for key, val in src.configitems(sect):
1047 for key, val in src.configitems(sect):
1047 dst.setconfig(sect, key, val, 'copied')
1048 dst.setconfig(sect, key, val, 'copied')
1048 v = src.config('web', 'cacerts')
1049 v = src.config('web', 'cacerts')
1049 if v:
1050 if v:
1050 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
1051 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
1051
1052
1052 return dst
1053 return dst
1053
1054
1054 # Files of interest
1055 # Files of interest
1055 # Used to check if the repository has changed looking at mtime and size of
1056 # Used to check if the repository has changed looking at mtime and size of
1056 # these files.
1057 # these files.
1057 foi = [('spath', '00changelog.i'),
1058 foi = [('spath', '00changelog.i'),
1058 ('spath', 'phaseroots'), # ! phase can change content at the same size
1059 ('spath', 'phaseroots'), # ! phase can change content at the same size
1059 ('spath', 'obsstore'),
1060 ('spath', 'obsstore'),
1060 ('path', 'bookmarks'), # ! bookmark can change content at the same size
1061 ('path', 'bookmarks'), # ! bookmark can change content at the same size
1061 ]
1062 ]
1062
1063
1063 class cachedlocalrepo(object):
1064 class cachedlocalrepo(object):
1064 """Holds a localrepository that can be cached and reused."""
1065 """Holds a localrepository that can be cached and reused."""
1065
1066
1066 def __init__(self, repo):
1067 def __init__(self, repo):
1067 """Create a new cached repo from an existing repo.
1068 """Create a new cached repo from an existing repo.
1068
1069
1069 We assume the passed in repo was recently created. If the
1070 We assume the passed in repo was recently created. If the
1070 repo has changed between when it was created and when it was
1071 repo has changed between when it was created and when it was
1071 turned into a cache, it may not refresh properly.
1072 turned into a cache, it may not refresh properly.
1072 """
1073 """
1073 assert isinstance(repo, localrepo.localrepository)
1074 assert isinstance(repo, localrepo.localrepository)
1074 self._repo = repo
1075 self._repo = repo
1075 self._state, self.mtime = self._repostate()
1076 self._state, self.mtime = self._repostate()
1076 self._filtername = repo.filtername
1077 self._filtername = repo.filtername
1077
1078
1078 def fetch(self):
1079 def fetch(self):
1079 """Refresh (if necessary) and return a repository.
1080 """Refresh (if necessary) and return a repository.
1080
1081
1081 If the cached instance is out of date, it will be recreated
1082 If the cached instance is out of date, it will be recreated
1082 automatically and returned.
1083 automatically and returned.
1083
1084
1084 Returns a tuple of the repo and a boolean indicating whether a new
1085 Returns a tuple of the repo and a boolean indicating whether a new
1085 repo instance was created.
1086 repo instance was created.
1086 """
1087 """
1087 # We compare the mtimes and sizes of some well-known files to
1088 # We compare the mtimes and sizes of some well-known files to
1088 # determine if the repo changed. This is not precise, as mtimes
1089 # determine if the repo changed. This is not precise, as mtimes
1089 # are susceptible to clock skew and imprecise filesystems and
1090 # are susceptible to clock skew and imprecise filesystems and
1090 # file content can change while maintaining the same size.
1091 # file content can change while maintaining the same size.
1091
1092
1092 state, mtime = self._repostate()
1093 state, mtime = self._repostate()
1093 if state == self._state:
1094 if state == self._state:
1094 return self._repo, False
1095 return self._repo, False
1095
1096
1096 repo = repository(self._repo.baseui, self._repo.url())
1097 repo = repository(self._repo.baseui, self._repo.url())
1097 if self._filtername:
1098 if self._filtername:
1098 self._repo = repo.filtered(self._filtername)
1099 self._repo = repo.filtered(self._filtername)
1099 else:
1100 else:
1100 self._repo = repo.unfiltered()
1101 self._repo = repo.unfiltered()
1101 self._state = state
1102 self._state = state
1102 self.mtime = mtime
1103 self.mtime = mtime
1103
1104
1104 return self._repo, True
1105 return self._repo, True
1105
1106
1106 def _repostate(self):
1107 def _repostate(self):
1107 state = []
1108 state = []
1108 maxmtime = -1
1109 maxmtime = -1
1109 for attr, fname in foi:
1110 for attr, fname in foi:
1110 prefix = getattr(self._repo, attr)
1111 prefix = getattr(self._repo, attr)
1111 p = os.path.join(prefix, fname)
1112 p = os.path.join(prefix, fname)
1112 try:
1113 try:
1113 st = os.stat(p)
1114 st = os.stat(p)
1114 except OSError:
1115 except OSError:
1115 st = os.stat(prefix)
1116 st = os.stat(prefix)
1116 state.append((st.st_mtime, st.st_size))
1117 state.append((st[stat.ST_MTIME], st.st_size))
1117 maxmtime = max(maxmtime, st.st_mtime)
1118 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1118
1119
1119 return tuple(state), maxmtime
1120 return tuple(state), maxmtime
1120
1121
1121 def copy(self):
1122 def copy(self):
1122 """Obtain a copy of this class instance.
1123 """Obtain a copy of this class instance.
1123
1124
1124 A new localrepository instance is obtained. The new instance should be
1125 A new localrepository instance is obtained. The new instance should be
1125 completely independent of the original.
1126 completely independent of the original.
1126 """
1127 """
1127 repo = repository(self._repo.baseui, self._repo.origroot)
1128 repo = repository(self._repo.baseui, self._repo.origroot)
1128 if self._filtername:
1129 if self._filtername:
1129 repo = repo.filtered(self._filtername)
1130 repo = repo.filtered(self._filtername)
1130 else:
1131 else:
1131 repo = repo.unfiltered()
1132 repo = repo.unfiltered()
1132 c = cachedlocalrepo(repo)
1133 c = cachedlocalrepo(repo)
1133 c._state = self._state
1134 c._state = self._state
1134 c.mtime = self.mtime
1135 c.mtime = self.mtime
1135 return c
1136 return c
@@ -1,249 +1,250 b''
1 # hgweb/common.py - Utility functions needed by hgweb_mod and hgwebdir_mod
1 # hgweb/common.py - Utility functions needed by hgweb_mod and hgwebdir_mod
2 #
2 #
3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
4 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import base64
11 import base64
12 import errno
12 import errno
13 import mimetypes
13 import mimetypes
14 import os
14 import os
15 import stat
15
16
16 from .. import (
17 from .. import (
17 encoding,
18 encoding,
18 pycompat,
19 pycompat,
19 util,
20 util,
20 )
21 )
21
22
22 httpserver = util.httpserver
23 httpserver = util.httpserver
23
24
24 HTTP_OK = 200
25 HTTP_OK = 200
25 HTTP_NOT_MODIFIED = 304
26 HTTP_NOT_MODIFIED = 304
26 HTTP_BAD_REQUEST = 400
27 HTTP_BAD_REQUEST = 400
27 HTTP_UNAUTHORIZED = 401
28 HTTP_UNAUTHORIZED = 401
28 HTTP_FORBIDDEN = 403
29 HTTP_FORBIDDEN = 403
29 HTTP_NOT_FOUND = 404
30 HTTP_NOT_FOUND = 404
30 HTTP_METHOD_NOT_ALLOWED = 405
31 HTTP_METHOD_NOT_ALLOWED = 405
31 HTTP_SERVER_ERROR = 500
32 HTTP_SERVER_ERROR = 500
32
33
33
34
34 def ismember(ui, username, userlist):
35 def ismember(ui, username, userlist):
35 """Check if username is a member of userlist.
36 """Check if username is a member of userlist.
36
37
37 If userlist has a single '*' member, all users are considered members.
38 If userlist has a single '*' member, all users are considered members.
38 Can be overridden by extensions to provide more complex authorization
39 Can be overridden by extensions to provide more complex authorization
39 schemes.
40 schemes.
40 """
41 """
41 return userlist == ['*'] or username in userlist
42 return userlist == ['*'] or username in userlist
42
43
43 def checkauthz(hgweb, req, op):
44 def checkauthz(hgweb, req, op):
44 '''Check permission for operation based on request data (including
45 '''Check permission for operation based on request data (including
45 authentication info). Return if op allowed, else raise an ErrorResponse
46 authentication info). Return if op allowed, else raise an ErrorResponse
46 exception.'''
47 exception.'''
47
48
48 user = req.env.get(r'REMOTE_USER')
49 user = req.env.get(r'REMOTE_USER')
49
50
50 deny_read = hgweb.configlist('web', 'deny_read')
51 deny_read = hgweb.configlist('web', 'deny_read')
51 if deny_read and (not user or ismember(hgweb.repo.ui, user, deny_read)):
52 if deny_read and (not user or ismember(hgweb.repo.ui, user, deny_read)):
52 raise ErrorResponse(HTTP_UNAUTHORIZED, 'read not authorized')
53 raise ErrorResponse(HTTP_UNAUTHORIZED, 'read not authorized')
53
54
54 allow_read = hgweb.configlist('web', 'allow_read')
55 allow_read = hgweb.configlist('web', 'allow_read')
55 if allow_read and (not ismember(hgweb.repo.ui, user, allow_read)):
56 if allow_read and (not ismember(hgweb.repo.ui, user, allow_read)):
56 raise ErrorResponse(HTTP_UNAUTHORIZED, 'read not authorized')
57 raise ErrorResponse(HTTP_UNAUTHORIZED, 'read not authorized')
57
58
58 if op == 'pull' and not hgweb.allowpull:
59 if op == 'pull' and not hgweb.allowpull:
59 raise ErrorResponse(HTTP_UNAUTHORIZED, 'pull not authorized')
60 raise ErrorResponse(HTTP_UNAUTHORIZED, 'pull not authorized')
60 elif op == 'pull' or op is None: # op is None for interface requests
61 elif op == 'pull' or op is None: # op is None for interface requests
61 return
62 return
62
63
63 # enforce that you can only push using POST requests
64 # enforce that you can only push using POST requests
64 if req.env[r'REQUEST_METHOD'] != r'POST':
65 if req.env[r'REQUEST_METHOD'] != r'POST':
65 msg = 'push requires POST request'
66 msg = 'push requires POST request'
66 raise ErrorResponse(HTTP_METHOD_NOT_ALLOWED, msg)
67 raise ErrorResponse(HTTP_METHOD_NOT_ALLOWED, msg)
67
68
68 # require ssl by default for pushing, auth info cannot be sniffed
69 # require ssl by default for pushing, auth info cannot be sniffed
69 # and replayed
70 # and replayed
70 scheme = req.env.get('wsgi.url_scheme')
71 scheme = req.env.get('wsgi.url_scheme')
71 if hgweb.configbool('web', 'push_ssl') and scheme != 'https':
72 if hgweb.configbool('web', 'push_ssl') and scheme != 'https':
72 raise ErrorResponse(HTTP_FORBIDDEN, 'ssl required')
73 raise ErrorResponse(HTTP_FORBIDDEN, 'ssl required')
73
74
74 deny = hgweb.configlist('web', 'deny_push')
75 deny = hgweb.configlist('web', 'deny_push')
75 if deny and (not user or ismember(hgweb.repo.ui, user, deny)):
76 if deny and (not user or ismember(hgweb.repo.ui, user, deny)):
76 raise ErrorResponse(HTTP_UNAUTHORIZED, 'push not authorized')
77 raise ErrorResponse(HTTP_UNAUTHORIZED, 'push not authorized')
77
78
78 allow = hgweb.configlist('web', 'allow-push')
79 allow = hgweb.configlist('web', 'allow-push')
79 if not (allow and ismember(hgweb.repo.ui, user, allow)):
80 if not (allow and ismember(hgweb.repo.ui, user, allow)):
80 raise ErrorResponse(HTTP_UNAUTHORIZED, 'push not authorized')
81 raise ErrorResponse(HTTP_UNAUTHORIZED, 'push not authorized')
81
82
82 # Hooks for hgweb permission checks; extensions can add hooks here.
83 # Hooks for hgweb permission checks; extensions can add hooks here.
83 # Each hook is invoked like this: hook(hgweb, request, operation),
84 # Each hook is invoked like this: hook(hgweb, request, operation),
84 # where operation is either read, pull or push. Hooks should either
85 # where operation is either read, pull or push. Hooks should either
85 # raise an ErrorResponse exception, or just return.
86 # raise an ErrorResponse exception, or just return.
86 #
87 #
87 # It is possible to do both authentication and authorization through
88 # It is possible to do both authentication and authorization through
88 # this.
89 # this.
89 permhooks = [checkauthz]
90 permhooks = [checkauthz]
90
91
91
92
92 class ErrorResponse(Exception):
93 class ErrorResponse(Exception):
93 def __init__(self, code, message=None, headers=None):
94 def __init__(self, code, message=None, headers=None):
94 if message is None:
95 if message is None:
95 message = _statusmessage(code)
96 message = _statusmessage(code)
96 Exception.__init__(self, pycompat.sysstr(message))
97 Exception.__init__(self, pycompat.sysstr(message))
97 self.code = code
98 self.code = code
98 if headers is None:
99 if headers is None:
99 headers = []
100 headers = []
100 self.headers = headers
101 self.headers = headers
101
102
102 class continuereader(object):
103 class continuereader(object):
103 def __init__(self, f, write):
104 def __init__(self, f, write):
104 self.f = f
105 self.f = f
105 self._write = write
106 self._write = write
106 self.continued = False
107 self.continued = False
107
108
108 def read(self, amt=-1):
109 def read(self, amt=-1):
109 if not self.continued:
110 if not self.continued:
110 self.continued = True
111 self.continued = True
111 self._write('HTTP/1.1 100 Continue\r\n\r\n')
112 self._write('HTTP/1.1 100 Continue\r\n\r\n')
112 return self.f.read(amt)
113 return self.f.read(amt)
113
114
114 def __getattr__(self, attr):
115 def __getattr__(self, attr):
115 if attr in ('close', 'readline', 'readlines', '__iter__'):
116 if attr in ('close', 'readline', 'readlines', '__iter__'):
116 return getattr(self.f, attr)
117 return getattr(self.f, attr)
117 raise AttributeError
118 raise AttributeError
118
119
119 def _statusmessage(code):
120 def _statusmessage(code):
120 responses = httpserver.basehttprequesthandler.responses
121 responses = httpserver.basehttprequesthandler.responses
121 return responses.get(code, ('Error', 'Unknown error'))[0]
122 return responses.get(code, ('Error', 'Unknown error'))[0]
122
123
123 def statusmessage(code, message=None):
124 def statusmessage(code, message=None):
124 return '%d %s' % (code, message or _statusmessage(code))
125 return '%d %s' % (code, message or _statusmessage(code))
125
126
126 def get_stat(spath, fn):
127 def get_stat(spath, fn):
127 """stat fn if it exists, spath otherwise"""
128 """stat fn if it exists, spath otherwise"""
128 cl_path = os.path.join(spath, fn)
129 cl_path = os.path.join(spath, fn)
129 if os.path.exists(cl_path):
130 if os.path.exists(cl_path):
130 return os.stat(cl_path)
131 return os.stat(cl_path)
131 else:
132 else:
132 return os.stat(spath)
133 return os.stat(spath)
133
134
134 def get_mtime(spath):
135 def get_mtime(spath):
135 return get_stat(spath, "00changelog.i").st_mtime
136 return get_stat(spath, "00changelog.i")[stat.ST_MTIME]
136
137
137 def ispathsafe(path):
138 def ispathsafe(path):
138 """Determine if a path is safe to use for filesystem access."""
139 """Determine if a path is safe to use for filesystem access."""
139 parts = path.split('/')
140 parts = path.split('/')
140 for part in parts:
141 for part in parts:
141 if (part in ('', pycompat.oscurdir, pycompat.ospardir) or
142 if (part in ('', pycompat.oscurdir, pycompat.ospardir) or
142 pycompat.ossep in part or
143 pycompat.ossep in part or
143 pycompat.osaltsep is not None and pycompat.osaltsep in part):
144 pycompat.osaltsep is not None and pycompat.osaltsep in part):
144 return False
145 return False
145
146
146 return True
147 return True
147
148
148 def staticfile(directory, fname, req):
149 def staticfile(directory, fname, req):
149 """return a file inside directory with guessed Content-Type header
150 """return a file inside directory with guessed Content-Type header
150
151
151 fname always uses '/' as directory separator and isn't allowed to
152 fname always uses '/' as directory separator and isn't allowed to
152 contain unusual path components.
153 contain unusual path components.
153 Content-Type is guessed using the mimetypes module.
154 Content-Type is guessed using the mimetypes module.
154 Return an empty string if fname is illegal or file not found.
155 Return an empty string if fname is illegal or file not found.
155
156
156 """
157 """
157 if not ispathsafe(fname):
158 if not ispathsafe(fname):
158 return
159 return
159
160
160 fpath = os.path.join(*fname.split('/'))
161 fpath = os.path.join(*fname.split('/'))
161 if isinstance(directory, str):
162 if isinstance(directory, str):
162 directory = [directory]
163 directory = [directory]
163 for d in directory:
164 for d in directory:
164 path = os.path.join(d, fpath)
165 path = os.path.join(d, fpath)
165 if os.path.exists(path):
166 if os.path.exists(path):
166 break
167 break
167 try:
168 try:
168 os.stat(path)
169 os.stat(path)
169 ct = mimetypes.guess_type(pycompat.fsdecode(path))[0] or "text/plain"
170 ct = mimetypes.guess_type(pycompat.fsdecode(path))[0] or "text/plain"
170 with open(path, 'rb') as fh:
171 with open(path, 'rb') as fh:
171 data = fh.read()
172 data = fh.read()
172
173
173 req.respond(HTTP_OK, ct, body=data)
174 req.respond(HTTP_OK, ct, body=data)
174 except TypeError:
175 except TypeError:
175 raise ErrorResponse(HTTP_SERVER_ERROR, 'illegal filename')
176 raise ErrorResponse(HTTP_SERVER_ERROR, 'illegal filename')
176 except OSError as err:
177 except OSError as err:
177 if err.errno == errno.ENOENT:
178 if err.errno == errno.ENOENT:
178 raise ErrorResponse(HTTP_NOT_FOUND)
179 raise ErrorResponse(HTTP_NOT_FOUND)
179 else:
180 else:
180 raise ErrorResponse(HTTP_SERVER_ERROR,
181 raise ErrorResponse(HTTP_SERVER_ERROR,
181 encoding.strtolocal(err.strerror))
182 encoding.strtolocal(err.strerror))
182
183
183 def paritygen(stripecount, offset=0):
184 def paritygen(stripecount, offset=0):
184 """count parity of horizontal stripes for easier reading"""
185 """count parity of horizontal stripes for easier reading"""
185 if stripecount and offset:
186 if stripecount and offset:
186 # account for offset, e.g. due to building the list in reverse
187 # account for offset, e.g. due to building the list in reverse
187 count = (stripecount + offset) % stripecount
188 count = (stripecount + offset) % stripecount
188 parity = (stripecount + offset) // stripecount & 1
189 parity = (stripecount + offset) // stripecount & 1
189 else:
190 else:
190 count = 0
191 count = 0
191 parity = 0
192 parity = 0
192 while True:
193 while True:
193 yield parity
194 yield parity
194 count += 1
195 count += 1
195 if stripecount and count >= stripecount:
196 if stripecount and count >= stripecount:
196 parity = 1 - parity
197 parity = 1 - parity
197 count = 0
198 count = 0
198
199
199 def get_contact(config):
200 def get_contact(config):
200 """Return repo contact information or empty string.
201 """Return repo contact information or empty string.
201
202
202 web.contact is the primary source, but if that is not set, try
203 web.contact is the primary source, but if that is not set, try
203 ui.username or $EMAIL as a fallback to display something useful.
204 ui.username or $EMAIL as a fallback to display something useful.
204 """
205 """
205 return (config("web", "contact") or
206 return (config("web", "contact") or
206 config("ui", "username") or
207 config("ui", "username") or
207 encoding.environ.get("EMAIL") or "")
208 encoding.environ.get("EMAIL") or "")
208
209
209 def caching(web, req):
210 def caching(web, req):
210 tag = r'W/"%d"' % web.mtime
211 tag = r'W/"%d"' % web.mtime
211 if req.env.get('HTTP_IF_NONE_MATCH') == tag:
212 if req.env.get('HTTP_IF_NONE_MATCH') == tag:
212 raise ErrorResponse(HTTP_NOT_MODIFIED)
213 raise ErrorResponse(HTTP_NOT_MODIFIED)
213 req.headers.append(('ETag', tag))
214 req.headers.append(('ETag', tag))
214
215
215 def cspvalues(ui):
216 def cspvalues(ui):
216 """Obtain the Content-Security-Policy header and nonce value.
217 """Obtain the Content-Security-Policy header and nonce value.
217
218
218 Returns a 2-tuple of the CSP header value and the nonce value.
219 Returns a 2-tuple of the CSP header value and the nonce value.
219
220
220 First value is ``None`` if CSP isn't enabled. Second value is ``None``
221 First value is ``None`` if CSP isn't enabled. Second value is ``None``
221 if CSP isn't enabled or if the CSP header doesn't need a nonce.
222 if CSP isn't enabled or if the CSP header doesn't need a nonce.
222 """
223 """
223 # Without demandimport, "import uuid" could have an immediate side-effect
224 # Without demandimport, "import uuid" could have an immediate side-effect
224 # running "ldconfig" on Linux trying to find libuuid.
225 # running "ldconfig" on Linux trying to find libuuid.
225 # With Python <= 2.7.12, that "ldconfig" is run via a shell and the shell
226 # With Python <= 2.7.12, that "ldconfig" is run via a shell and the shell
226 # may pollute the terminal with:
227 # may pollute the terminal with:
227 #
228 #
228 # shell-init: error retrieving current directory: getcwd: cannot access
229 # shell-init: error retrieving current directory: getcwd: cannot access
229 # parent directories: No such file or directory
230 # parent directories: No such file or directory
230 #
231 #
231 # Python >= 2.7.13 has fixed it by running "ldconfig" directly without a
232 # Python >= 2.7.13 has fixed it by running "ldconfig" directly without a
232 # shell (hg changeset a09ae70f3489).
233 # shell (hg changeset a09ae70f3489).
233 #
234 #
234 # Moved "import uuid" from here so it's executed after we know we have
235 # Moved "import uuid" from here so it's executed after we know we have
235 # a sane cwd (i.e. after dispatch.py cwd check).
236 # a sane cwd (i.e. after dispatch.py cwd check).
236 #
237 #
237 # We can move it back once we no longer need Python <= 2.7.12 support.
238 # We can move it back once we no longer need Python <= 2.7.12 support.
238 import uuid
239 import uuid
239
240
240 # Don't allow untrusted CSP setting since it be disable protections
241 # Don't allow untrusted CSP setting since it be disable protections
241 # from a trusted/global source.
242 # from a trusted/global source.
242 csp = ui.config('web', 'csp', untrusted=False)
243 csp = ui.config('web', 'csp', untrusted=False)
243 nonce = None
244 nonce = None
244
245
245 if csp and '%nonce%' in csp:
246 if csp and '%nonce%' in csp:
246 nonce = base64.urlsafe_b64encode(uuid.uuid4().bytes).rstrip('=')
247 nonce = base64.urlsafe_b64encode(uuid.uuid4().bytes).rstrip('=')
247 csp = csp.replace('%nonce%', nonce)
248 csp = csp.replace('%nonce%', nonce)
248
249
249 return csp, nonce
250 return csp, nonce
@@ -1,696 +1,696 b''
1 # posix.py - Posix utility function implementations for Mercurial
1 # posix.py - Posix utility function implementations for Mercurial
2 #
2 #
3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import fcntl
11 import fcntl
12 import getpass
12 import getpass
13 import grp
13 import grp
14 import os
14 import os
15 import pwd
15 import pwd
16 import re
16 import re
17 import select
17 import select
18 import stat
18 import stat
19 import sys
19 import sys
20 import tempfile
20 import tempfile
21 import unicodedata
21 import unicodedata
22
22
23 from .i18n import _
23 from .i18n import _
24 from . import (
24 from . import (
25 encoding,
25 encoding,
26 error,
26 error,
27 policy,
27 policy,
28 pycompat,
28 pycompat,
29 )
29 )
30
30
31 osutil = policy.importmod(r'osutil')
31 osutil = policy.importmod(r'osutil')
32
32
33 posixfile = open
33 posixfile = open
34 normpath = os.path.normpath
34 normpath = os.path.normpath
35 samestat = os.path.samestat
35 samestat = os.path.samestat
36 try:
36 try:
37 oslink = os.link
37 oslink = os.link
38 except AttributeError:
38 except AttributeError:
39 # Some platforms build Python without os.link on systems that are
39 # Some platforms build Python without os.link on systems that are
40 # vaguely unix-like but don't have hardlink support. For those
40 # vaguely unix-like but don't have hardlink support. For those
41 # poor souls, just say we tried and that it failed so we fall back
41 # poor souls, just say we tried and that it failed so we fall back
42 # to copies.
42 # to copies.
43 def oslink(src, dst):
43 def oslink(src, dst):
44 raise OSError(errno.EINVAL,
44 raise OSError(errno.EINVAL,
45 'hardlinks not supported: %s to %s' % (src, dst))
45 'hardlinks not supported: %s to %s' % (src, dst))
46 unlink = os.unlink
46 unlink = os.unlink
47 rename = os.rename
47 rename = os.rename
48 removedirs = os.removedirs
48 removedirs = os.removedirs
49 expandglobs = False
49 expandglobs = False
50
50
51 umask = os.umask(0)
51 umask = os.umask(0)
52 os.umask(umask)
52 os.umask(umask)
53
53
54 def split(p):
54 def split(p):
55 '''Same as posixpath.split, but faster
55 '''Same as posixpath.split, but faster
56
56
57 >>> import posixpath
57 >>> import posixpath
58 >>> for f in [b'/absolute/path/to/file',
58 >>> for f in [b'/absolute/path/to/file',
59 ... b'relative/path/to/file',
59 ... b'relative/path/to/file',
60 ... b'file_alone',
60 ... b'file_alone',
61 ... b'path/to/directory/',
61 ... b'path/to/directory/',
62 ... b'/multiple/path//separators',
62 ... b'/multiple/path//separators',
63 ... b'/file_at_root',
63 ... b'/file_at_root',
64 ... b'///multiple_leading_separators_at_root',
64 ... b'///multiple_leading_separators_at_root',
65 ... b'']:
65 ... b'']:
66 ... assert split(f) == posixpath.split(f), f
66 ... assert split(f) == posixpath.split(f), f
67 '''
67 '''
68 ht = p.rsplit('/', 1)
68 ht = p.rsplit('/', 1)
69 if len(ht) == 1:
69 if len(ht) == 1:
70 return '', p
70 return '', p
71 nh = ht[0].rstrip('/')
71 nh = ht[0].rstrip('/')
72 if nh:
72 if nh:
73 return nh, ht[1]
73 return nh, ht[1]
74 return ht[0] + '/', ht[1]
74 return ht[0] + '/', ht[1]
75
75
76 def openhardlinks():
76 def openhardlinks():
77 '''return true if it is safe to hold open file handles to hardlinks'''
77 '''return true if it is safe to hold open file handles to hardlinks'''
78 return True
78 return True
79
79
80 def nlinks(name):
80 def nlinks(name):
81 '''return number of hardlinks for the given file'''
81 '''return number of hardlinks for the given file'''
82 return os.lstat(name).st_nlink
82 return os.lstat(name).st_nlink
83
83
84 def parsepatchoutput(output_line):
84 def parsepatchoutput(output_line):
85 """parses the output produced by patch and returns the filename"""
85 """parses the output produced by patch and returns the filename"""
86 pf = output_line[14:]
86 pf = output_line[14:]
87 if pycompat.sysplatform == 'OpenVMS':
87 if pycompat.sysplatform == 'OpenVMS':
88 if pf[0] == '`':
88 if pf[0] == '`':
89 pf = pf[1:-1] # Remove the quotes
89 pf = pf[1:-1] # Remove the quotes
90 else:
90 else:
91 if pf.startswith("'") and pf.endswith("'") and " " in pf:
91 if pf.startswith("'") and pf.endswith("'") and " " in pf:
92 pf = pf[1:-1] # Remove the quotes
92 pf = pf[1:-1] # Remove the quotes
93 return pf
93 return pf
94
94
95 def sshargs(sshcmd, host, user, port):
95 def sshargs(sshcmd, host, user, port):
96 '''Build argument list for ssh'''
96 '''Build argument list for ssh'''
97 args = user and ("%s@%s" % (user, host)) or host
97 args = user and ("%s@%s" % (user, host)) or host
98 if '-' in args[:1]:
98 if '-' in args[:1]:
99 raise error.Abort(
99 raise error.Abort(
100 _('illegal ssh hostname or username starting with -: %s') % args)
100 _('illegal ssh hostname or username starting with -: %s') % args)
101 args = shellquote(args)
101 args = shellquote(args)
102 if port:
102 if port:
103 args = '-p %s %s' % (shellquote(port), args)
103 args = '-p %s %s' % (shellquote(port), args)
104 return args
104 return args
105
105
106 def isexec(f):
106 def isexec(f):
107 """check whether a file is executable"""
107 """check whether a file is executable"""
108 return (os.lstat(f).st_mode & 0o100 != 0)
108 return (os.lstat(f).st_mode & 0o100 != 0)
109
109
110 def setflags(f, l, x):
110 def setflags(f, l, x):
111 st = os.lstat(f)
111 st = os.lstat(f)
112 s = st.st_mode
112 s = st.st_mode
113 if l:
113 if l:
114 if not stat.S_ISLNK(s):
114 if not stat.S_ISLNK(s):
115 # switch file to link
115 # switch file to link
116 fp = open(f, 'rb')
116 fp = open(f, 'rb')
117 data = fp.read()
117 data = fp.read()
118 fp.close()
118 fp.close()
119 unlink(f)
119 unlink(f)
120 try:
120 try:
121 os.symlink(data, f)
121 os.symlink(data, f)
122 except OSError:
122 except OSError:
123 # failed to make a link, rewrite file
123 # failed to make a link, rewrite file
124 fp = open(f, "wb")
124 fp = open(f, "wb")
125 fp.write(data)
125 fp.write(data)
126 fp.close()
126 fp.close()
127 # no chmod needed at this point
127 # no chmod needed at this point
128 return
128 return
129 if stat.S_ISLNK(s):
129 if stat.S_ISLNK(s):
130 # switch link to file
130 # switch link to file
131 data = os.readlink(f)
131 data = os.readlink(f)
132 unlink(f)
132 unlink(f)
133 fp = open(f, "wb")
133 fp = open(f, "wb")
134 fp.write(data)
134 fp.write(data)
135 fp.close()
135 fp.close()
136 s = 0o666 & ~umask # avoid restatting for chmod
136 s = 0o666 & ~umask # avoid restatting for chmod
137
137
138 sx = s & 0o100
138 sx = s & 0o100
139 if st.st_nlink > 1 and bool(x) != bool(sx):
139 if st.st_nlink > 1 and bool(x) != bool(sx):
140 # the file is a hardlink, break it
140 # the file is a hardlink, break it
141 with open(f, "rb") as fp:
141 with open(f, "rb") as fp:
142 data = fp.read()
142 data = fp.read()
143 unlink(f)
143 unlink(f)
144 with open(f, "wb") as fp:
144 with open(f, "wb") as fp:
145 fp.write(data)
145 fp.write(data)
146
146
147 if x and not sx:
147 if x and not sx:
148 # Turn on +x for every +r bit when making a file executable
148 # Turn on +x for every +r bit when making a file executable
149 # and obey umask.
149 # and obey umask.
150 os.chmod(f, s | (s & 0o444) >> 2 & ~umask)
150 os.chmod(f, s | (s & 0o444) >> 2 & ~umask)
151 elif not x and sx:
151 elif not x and sx:
152 # Turn off all +x bits
152 # Turn off all +x bits
153 os.chmod(f, s & 0o666)
153 os.chmod(f, s & 0o666)
154
154
155 def copymode(src, dst, mode=None):
155 def copymode(src, dst, mode=None):
156 '''Copy the file mode from the file at path src to dst.
156 '''Copy the file mode from the file at path src to dst.
157 If src doesn't exist, we're using mode instead. If mode is None, we're
157 If src doesn't exist, we're using mode instead. If mode is None, we're
158 using umask.'''
158 using umask.'''
159 try:
159 try:
160 st_mode = os.lstat(src).st_mode & 0o777
160 st_mode = os.lstat(src).st_mode & 0o777
161 except OSError as inst:
161 except OSError as inst:
162 if inst.errno != errno.ENOENT:
162 if inst.errno != errno.ENOENT:
163 raise
163 raise
164 st_mode = mode
164 st_mode = mode
165 if st_mode is None:
165 if st_mode is None:
166 st_mode = ~umask
166 st_mode = ~umask
167 st_mode &= 0o666
167 st_mode &= 0o666
168 os.chmod(dst, st_mode)
168 os.chmod(dst, st_mode)
169
169
170 def checkexec(path):
170 def checkexec(path):
171 """
171 """
172 Check whether the given path is on a filesystem with UNIX-like exec flags
172 Check whether the given path is on a filesystem with UNIX-like exec flags
173
173
174 Requires a directory (like /foo/.hg)
174 Requires a directory (like /foo/.hg)
175 """
175 """
176
176
177 # VFAT on some Linux versions can flip mode but it doesn't persist
177 # VFAT on some Linux versions can flip mode but it doesn't persist
178 # a FS remount. Frequently we can detect it if files are created
178 # a FS remount. Frequently we can detect it if files are created
179 # with exec bit on.
179 # with exec bit on.
180
180
181 try:
181 try:
182 EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
182 EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
183 cachedir = os.path.join(path, '.hg', 'cache')
183 cachedir = os.path.join(path, '.hg', 'cache')
184 if os.path.isdir(cachedir):
184 if os.path.isdir(cachedir):
185 checkisexec = os.path.join(cachedir, 'checkisexec')
185 checkisexec = os.path.join(cachedir, 'checkisexec')
186 checknoexec = os.path.join(cachedir, 'checknoexec')
186 checknoexec = os.path.join(cachedir, 'checknoexec')
187
187
188 try:
188 try:
189 m = os.stat(checkisexec).st_mode
189 m = os.stat(checkisexec).st_mode
190 except OSError as e:
190 except OSError as e:
191 if e.errno != errno.ENOENT:
191 if e.errno != errno.ENOENT:
192 raise
192 raise
193 # checkisexec does not exist - fall through ...
193 # checkisexec does not exist - fall through ...
194 else:
194 else:
195 # checkisexec exists, check if it actually is exec
195 # checkisexec exists, check if it actually is exec
196 if m & EXECFLAGS != 0:
196 if m & EXECFLAGS != 0:
197 # ensure checkisexec exists, check it isn't exec
197 # ensure checkisexec exists, check it isn't exec
198 try:
198 try:
199 m = os.stat(checknoexec).st_mode
199 m = os.stat(checknoexec).st_mode
200 except OSError as e:
200 except OSError as e:
201 if e.errno != errno.ENOENT:
201 if e.errno != errno.ENOENT:
202 raise
202 raise
203 open(checknoexec, 'w').close() # might fail
203 open(checknoexec, 'w').close() # might fail
204 m = os.stat(checknoexec).st_mode
204 m = os.stat(checknoexec).st_mode
205 if m & EXECFLAGS == 0:
205 if m & EXECFLAGS == 0:
206 # check-exec is exec and check-no-exec is not exec
206 # check-exec is exec and check-no-exec is not exec
207 return True
207 return True
208 # checknoexec exists but is exec - delete it
208 # checknoexec exists but is exec - delete it
209 unlink(checknoexec)
209 unlink(checknoexec)
210 # checkisexec exists but is not exec - delete it
210 # checkisexec exists but is not exec - delete it
211 unlink(checkisexec)
211 unlink(checkisexec)
212
212
213 # check using one file, leave it as checkisexec
213 # check using one file, leave it as checkisexec
214 checkdir = cachedir
214 checkdir = cachedir
215 else:
215 else:
216 # check directly in path and don't leave checkisexec behind
216 # check directly in path and don't leave checkisexec behind
217 checkdir = path
217 checkdir = path
218 checkisexec = None
218 checkisexec = None
219 fh, fn = tempfile.mkstemp(dir=checkdir, prefix='hg-checkexec-')
219 fh, fn = tempfile.mkstemp(dir=checkdir, prefix='hg-checkexec-')
220 try:
220 try:
221 os.close(fh)
221 os.close(fh)
222 m = os.stat(fn).st_mode
222 m = os.stat(fn).st_mode
223 if m & EXECFLAGS == 0:
223 if m & EXECFLAGS == 0:
224 os.chmod(fn, m & 0o777 | EXECFLAGS)
224 os.chmod(fn, m & 0o777 | EXECFLAGS)
225 if os.stat(fn).st_mode & EXECFLAGS != 0:
225 if os.stat(fn).st_mode & EXECFLAGS != 0:
226 if checkisexec is not None:
226 if checkisexec is not None:
227 os.rename(fn, checkisexec)
227 os.rename(fn, checkisexec)
228 fn = None
228 fn = None
229 return True
229 return True
230 finally:
230 finally:
231 if fn is not None:
231 if fn is not None:
232 unlink(fn)
232 unlink(fn)
233 except (IOError, OSError):
233 except (IOError, OSError):
234 # we don't care, the user probably won't be able to commit anyway
234 # we don't care, the user probably won't be able to commit anyway
235 return False
235 return False
236
236
237 def checklink(path):
237 def checklink(path):
238 """check whether the given path is on a symlink-capable filesystem"""
238 """check whether the given path is on a symlink-capable filesystem"""
239 # mktemp is not racy because symlink creation will fail if the
239 # mktemp is not racy because symlink creation will fail if the
240 # file already exists
240 # file already exists
241 while True:
241 while True:
242 cachedir = os.path.join(path, '.hg', 'cache')
242 cachedir = os.path.join(path, '.hg', 'cache')
243 checklink = os.path.join(cachedir, 'checklink')
243 checklink = os.path.join(cachedir, 'checklink')
244 # try fast path, read only
244 # try fast path, read only
245 if os.path.islink(checklink):
245 if os.path.islink(checklink):
246 return True
246 return True
247 if os.path.isdir(cachedir):
247 if os.path.isdir(cachedir):
248 checkdir = cachedir
248 checkdir = cachedir
249 else:
249 else:
250 checkdir = path
250 checkdir = path
251 cachedir = None
251 cachedir = None
252 fscheckdir = pycompat.fsdecode(checkdir)
252 fscheckdir = pycompat.fsdecode(checkdir)
253 name = tempfile.mktemp(dir=fscheckdir,
253 name = tempfile.mktemp(dir=fscheckdir,
254 prefix=r'checklink-')
254 prefix=r'checklink-')
255 name = pycompat.fsencode(name)
255 name = pycompat.fsencode(name)
256 try:
256 try:
257 fd = None
257 fd = None
258 if cachedir is None:
258 if cachedir is None:
259 fd = tempfile.NamedTemporaryFile(dir=fscheckdir,
259 fd = tempfile.NamedTemporaryFile(dir=fscheckdir,
260 prefix=r'hg-checklink-')
260 prefix=r'hg-checklink-')
261 target = pycompat.fsencode(os.path.basename(fd.name))
261 target = pycompat.fsencode(os.path.basename(fd.name))
262 else:
262 else:
263 # create a fixed file to link to; doesn't matter if it
263 # create a fixed file to link to; doesn't matter if it
264 # already exists.
264 # already exists.
265 target = 'checklink-target'
265 target = 'checklink-target'
266 try:
266 try:
267 open(os.path.join(cachedir, target), 'w').close()
267 open(os.path.join(cachedir, target), 'w').close()
268 except IOError as inst:
268 except IOError as inst:
269 if inst[0] == errno.EACCES:
269 if inst[0] == errno.EACCES:
270 # If we can't write to cachedir, just pretend
270 # If we can't write to cachedir, just pretend
271 # that the fs is readonly and by association
271 # that the fs is readonly and by association
272 # that the fs won't support symlinks. This
272 # that the fs won't support symlinks. This
273 # seems like the least dangerous way to avoid
273 # seems like the least dangerous way to avoid
274 # data loss.
274 # data loss.
275 return False
275 return False
276 raise
276 raise
277 try:
277 try:
278 os.symlink(target, name)
278 os.symlink(target, name)
279 if cachedir is None:
279 if cachedir is None:
280 unlink(name)
280 unlink(name)
281 else:
281 else:
282 try:
282 try:
283 os.rename(name, checklink)
283 os.rename(name, checklink)
284 except OSError:
284 except OSError:
285 unlink(name)
285 unlink(name)
286 return True
286 return True
287 except OSError as inst:
287 except OSError as inst:
288 # link creation might race, try again
288 # link creation might race, try again
289 if inst[0] == errno.EEXIST:
289 if inst[0] == errno.EEXIST:
290 continue
290 continue
291 raise
291 raise
292 finally:
292 finally:
293 if fd is not None:
293 if fd is not None:
294 fd.close()
294 fd.close()
295 except AttributeError:
295 except AttributeError:
296 return False
296 return False
297 except OSError as inst:
297 except OSError as inst:
298 # sshfs might report failure while successfully creating the link
298 # sshfs might report failure while successfully creating the link
299 if inst[0] == errno.EIO and os.path.exists(name):
299 if inst[0] == errno.EIO and os.path.exists(name):
300 unlink(name)
300 unlink(name)
301 return False
301 return False
302
302
303 def checkosfilename(path):
303 def checkosfilename(path):
304 '''Check that the base-relative path is a valid filename on this platform.
304 '''Check that the base-relative path is a valid filename on this platform.
305 Returns None if the path is ok, or a UI string describing the problem.'''
305 Returns None if the path is ok, or a UI string describing the problem.'''
306 return None # on posix platforms, every path is ok
306 return None # on posix platforms, every path is ok
307
307
308 def getfsmountpoint(dirpath):
308 def getfsmountpoint(dirpath):
309 '''Get the filesystem mount point from a directory (best-effort)
309 '''Get the filesystem mount point from a directory (best-effort)
310
310
311 Returns None if we are unsure. Raises OSError on ENOENT, EPERM, etc.
311 Returns None if we are unsure. Raises OSError on ENOENT, EPERM, etc.
312 '''
312 '''
313 return getattr(osutil, 'getfsmountpoint', lambda x: None)(dirpath)
313 return getattr(osutil, 'getfsmountpoint', lambda x: None)(dirpath)
314
314
315 def getfstype(dirpath):
315 def getfstype(dirpath):
316 '''Get the filesystem type name from a directory (best-effort)
316 '''Get the filesystem type name from a directory (best-effort)
317
317
318 Returns None if we are unsure. Raises OSError on ENOENT, EPERM, etc.
318 Returns None if we are unsure. Raises OSError on ENOENT, EPERM, etc.
319 '''
319 '''
320 return getattr(osutil, 'getfstype', lambda x: None)(dirpath)
320 return getattr(osutil, 'getfstype', lambda x: None)(dirpath)
321
321
322 def setbinary(fd):
322 def setbinary(fd):
323 pass
323 pass
324
324
325 def pconvert(path):
325 def pconvert(path):
326 return path
326 return path
327
327
328 def localpath(path):
328 def localpath(path):
329 return path
329 return path
330
330
331 def samefile(fpath1, fpath2):
331 def samefile(fpath1, fpath2):
332 """Returns whether path1 and path2 refer to the same file. This is only
332 """Returns whether path1 and path2 refer to the same file. This is only
333 guaranteed to work for files, not directories."""
333 guaranteed to work for files, not directories."""
334 return os.path.samefile(fpath1, fpath2)
334 return os.path.samefile(fpath1, fpath2)
335
335
336 def samedevice(fpath1, fpath2):
336 def samedevice(fpath1, fpath2):
337 """Returns whether fpath1 and fpath2 are on the same device. This is only
337 """Returns whether fpath1 and fpath2 are on the same device. This is only
338 guaranteed to work for files, not directories."""
338 guaranteed to work for files, not directories."""
339 st1 = os.lstat(fpath1)
339 st1 = os.lstat(fpath1)
340 st2 = os.lstat(fpath2)
340 st2 = os.lstat(fpath2)
341 return st1.st_dev == st2.st_dev
341 return st1.st_dev == st2.st_dev
342
342
343 # os.path.normcase is a no-op, which doesn't help us on non-native filesystems
343 # os.path.normcase is a no-op, which doesn't help us on non-native filesystems
344 def normcase(path):
344 def normcase(path):
345 return path.lower()
345 return path.lower()
346
346
347 # what normcase does to ASCII strings
347 # what normcase does to ASCII strings
348 normcasespec = encoding.normcasespecs.lower
348 normcasespec = encoding.normcasespecs.lower
349 # fallback normcase function for non-ASCII strings
349 # fallback normcase function for non-ASCII strings
350 normcasefallback = normcase
350 normcasefallback = normcase
351
351
352 if pycompat.isdarwin:
352 if pycompat.isdarwin:
353
353
354 def normcase(path):
354 def normcase(path):
355 '''
355 '''
356 Normalize a filename for OS X-compatible comparison:
356 Normalize a filename for OS X-compatible comparison:
357 - escape-encode invalid characters
357 - escape-encode invalid characters
358 - decompose to NFD
358 - decompose to NFD
359 - lowercase
359 - lowercase
360 - omit ignored characters [200c-200f, 202a-202e, 206a-206f,feff]
360 - omit ignored characters [200c-200f, 202a-202e, 206a-206f,feff]
361
361
362 >>> normcase(b'UPPER')
362 >>> normcase(b'UPPER')
363 'upper'
363 'upper'
364 >>> normcase(b'Caf\\xc3\\xa9')
364 >>> normcase(b'Caf\\xc3\\xa9')
365 'cafe\\xcc\\x81'
365 'cafe\\xcc\\x81'
366 >>> normcase(b'\\xc3\\x89')
366 >>> normcase(b'\\xc3\\x89')
367 'e\\xcc\\x81'
367 'e\\xcc\\x81'
368 >>> normcase(b'\\xb8\\xca\\xc3\\xca\\xbe\\xc8.JPG') # issue3918
368 >>> normcase(b'\\xb8\\xca\\xc3\\xca\\xbe\\xc8.JPG') # issue3918
369 '%b8%ca%c3\\xca\\xbe%c8.jpg'
369 '%b8%ca%c3\\xca\\xbe%c8.jpg'
370 '''
370 '''
371
371
372 try:
372 try:
373 return encoding.asciilower(path) # exception for non-ASCII
373 return encoding.asciilower(path) # exception for non-ASCII
374 except UnicodeDecodeError:
374 except UnicodeDecodeError:
375 return normcasefallback(path)
375 return normcasefallback(path)
376
376
377 normcasespec = encoding.normcasespecs.lower
377 normcasespec = encoding.normcasespecs.lower
378
378
379 def normcasefallback(path):
379 def normcasefallback(path):
380 try:
380 try:
381 u = path.decode('utf-8')
381 u = path.decode('utf-8')
382 except UnicodeDecodeError:
382 except UnicodeDecodeError:
383 # OS X percent-encodes any bytes that aren't valid utf-8
383 # OS X percent-encodes any bytes that aren't valid utf-8
384 s = ''
384 s = ''
385 pos = 0
385 pos = 0
386 l = len(path)
386 l = len(path)
387 while pos < l:
387 while pos < l:
388 try:
388 try:
389 c = encoding.getutf8char(path, pos)
389 c = encoding.getutf8char(path, pos)
390 pos += len(c)
390 pos += len(c)
391 except ValueError:
391 except ValueError:
392 c = '%%%02X' % ord(path[pos:pos + 1])
392 c = '%%%02X' % ord(path[pos:pos + 1])
393 pos += 1
393 pos += 1
394 s += c
394 s += c
395
395
396 u = s.decode('utf-8')
396 u = s.decode('utf-8')
397
397
398 # Decompose then lowercase (HFS+ technote specifies lower)
398 # Decompose then lowercase (HFS+ technote specifies lower)
399 enc = unicodedata.normalize(r'NFD', u).lower().encode('utf-8')
399 enc = unicodedata.normalize(r'NFD', u).lower().encode('utf-8')
400 # drop HFS+ ignored characters
400 # drop HFS+ ignored characters
401 return encoding.hfsignoreclean(enc)
401 return encoding.hfsignoreclean(enc)
402
402
403 if pycompat.sysplatform == 'cygwin':
403 if pycompat.sysplatform == 'cygwin':
404 # workaround for cygwin, in which mount point part of path is
404 # workaround for cygwin, in which mount point part of path is
405 # treated as case sensitive, even though underlying NTFS is case
405 # treated as case sensitive, even though underlying NTFS is case
406 # insensitive.
406 # insensitive.
407
407
408 # default mount points
408 # default mount points
409 cygwinmountpoints = sorted([
409 cygwinmountpoints = sorted([
410 "/usr/bin",
410 "/usr/bin",
411 "/usr/lib",
411 "/usr/lib",
412 "/cygdrive",
412 "/cygdrive",
413 ], reverse=True)
413 ], reverse=True)
414
414
415 # use upper-ing as normcase as same as NTFS workaround
415 # use upper-ing as normcase as same as NTFS workaround
416 def normcase(path):
416 def normcase(path):
417 pathlen = len(path)
417 pathlen = len(path)
418 if (pathlen == 0) or (path[0] != pycompat.ossep):
418 if (pathlen == 0) or (path[0] != pycompat.ossep):
419 # treat as relative
419 # treat as relative
420 return encoding.upper(path)
420 return encoding.upper(path)
421
421
422 # to preserve case of mountpoint part
422 # to preserve case of mountpoint part
423 for mp in cygwinmountpoints:
423 for mp in cygwinmountpoints:
424 if not path.startswith(mp):
424 if not path.startswith(mp):
425 continue
425 continue
426
426
427 mplen = len(mp)
427 mplen = len(mp)
428 if mplen == pathlen: # mount point itself
428 if mplen == pathlen: # mount point itself
429 return mp
429 return mp
430 if path[mplen] == pycompat.ossep:
430 if path[mplen] == pycompat.ossep:
431 return mp + encoding.upper(path[mplen:])
431 return mp + encoding.upper(path[mplen:])
432
432
433 return encoding.upper(path)
433 return encoding.upper(path)
434
434
435 normcasespec = encoding.normcasespecs.other
435 normcasespec = encoding.normcasespecs.other
436 normcasefallback = normcase
436 normcasefallback = normcase
437
437
438 # Cygwin translates native ACLs to POSIX permissions,
438 # Cygwin translates native ACLs to POSIX permissions,
439 # but these translations are not supported by native
439 # but these translations are not supported by native
440 # tools, so the exec bit tends to be set erroneously.
440 # tools, so the exec bit tends to be set erroneously.
441 # Therefore, disable executable bit access on Cygwin.
441 # Therefore, disable executable bit access on Cygwin.
442 def checkexec(path):
442 def checkexec(path):
443 return False
443 return False
444
444
445 # Similarly, Cygwin's symlink emulation is likely to create
445 # Similarly, Cygwin's symlink emulation is likely to create
446 # problems when Mercurial is used from both Cygwin and native
446 # problems when Mercurial is used from both Cygwin and native
447 # Windows, with other native tools, or on shared volumes
447 # Windows, with other native tools, or on shared volumes
448 def checklink(path):
448 def checklink(path):
449 return False
449 return False
450
450
451 _needsshellquote = None
451 _needsshellquote = None
452 def shellquote(s):
452 def shellquote(s):
453 if pycompat.sysplatform == 'OpenVMS':
453 if pycompat.sysplatform == 'OpenVMS':
454 return '"%s"' % s
454 return '"%s"' % s
455 global _needsshellquote
455 global _needsshellquote
456 if _needsshellquote is None:
456 if _needsshellquote is None:
457 _needsshellquote = re.compile(br'[^a-zA-Z0-9._/+-]').search
457 _needsshellquote = re.compile(br'[^a-zA-Z0-9._/+-]').search
458 if s and not _needsshellquote(s):
458 if s and not _needsshellquote(s):
459 # "s" shouldn't have to be quoted
459 # "s" shouldn't have to be quoted
460 return s
460 return s
461 else:
461 else:
462 return "'%s'" % s.replace("'", "'\\''")
462 return "'%s'" % s.replace("'", "'\\''")
463
463
464 def shellsplit(s):
464 def shellsplit(s):
465 """Parse a command string in POSIX shell way (best-effort)"""
465 """Parse a command string in POSIX shell way (best-effort)"""
466 return pycompat.shlexsplit(s, posix=True)
466 return pycompat.shlexsplit(s, posix=True)
467
467
468 def quotecommand(cmd):
468 def quotecommand(cmd):
469 return cmd
469 return cmd
470
470
471 def popen(command, mode='r'):
471 def popen(command, mode='r'):
472 return os.popen(command, mode)
472 return os.popen(command, mode)
473
473
474 def testpid(pid):
474 def testpid(pid):
475 '''return False if pid dead, True if running or not sure'''
475 '''return False if pid dead, True if running or not sure'''
476 if pycompat.sysplatform == 'OpenVMS':
476 if pycompat.sysplatform == 'OpenVMS':
477 return True
477 return True
478 try:
478 try:
479 os.kill(pid, 0)
479 os.kill(pid, 0)
480 return True
480 return True
481 except OSError as inst:
481 except OSError as inst:
482 return inst.errno != errno.ESRCH
482 return inst.errno != errno.ESRCH
483
483
484 def explainexit(code):
484 def explainexit(code):
485 """return a 2-tuple (desc, code) describing a subprocess status
485 """return a 2-tuple (desc, code) describing a subprocess status
486 (codes from kill are negative - not os.system/wait encoding)"""
486 (codes from kill are negative - not os.system/wait encoding)"""
487 if code >= 0:
487 if code >= 0:
488 return _("exited with status %d") % code, code
488 return _("exited with status %d") % code, code
489 return _("killed by signal %d") % -code, -code
489 return _("killed by signal %d") % -code, -code
490
490
491 def isowner(st):
491 def isowner(st):
492 """Return True if the stat object st is from the current user."""
492 """Return True if the stat object st is from the current user."""
493 return st.st_uid == os.getuid()
493 return st.st_uid == os.getuid()
494
494
495 def findexe(command):
495 def findexe(command):
496 '''Find executable for command searching like which does.
496 '''Find executable for command searching like which does.
497 If command is a basename then PATH is searched for command.
497 If command is a basename then PATH is searched for command.
498 PATH isn't searched if command is an absolute or relative path.
498 PATH isn't searched if command is an absolute or relative path.
499 If command isn't found None is returned.'''
499 If command isn't found None is returned.'''
500 if pycompat.sysplatform == 'OpenVMS':
500 if pycompat.sysplatform == 'OpenVMS':
501 return command
501 return command
502
502
503 def findexisting(executable):
503 def findexisting(executable):
504 'Will return executable if existing file'
504 'Will return executable if existing file'
505 if os.path.isfile(executable) and os.access(executable, os.X_OK):
505 if os.path.isfile(executable) and os.access(executable, os.X_OK):
506 return executable
506 return executable
507 return None
507 return None
508
508
509 if pycompat.ossep in command:
509 if pycompat.ossep in command:
510 return findexisting(command)
510 return findexisting(command)
511
511
512 if pycompat.sysplatform == 'plan9':
512 if pycompat.sysplatform == 'plan9':
513 return findexisting(os.path.join('/bin', command))
513 return findexisting(os.path.join('/bin', command))
514
514
515 for path in encoding.environ.get('PATH', '').split(pycompat.ospathsep):
515 for path in encoding.environ.get('PATH', '').split(pycompat.ospathsep):
516 executable = findexisting(os.path.join(path, command))
516 executable = findexisting(os.path.join(path, command))
517 if executable is not None:
517 if executable is not None:
518 return executable
518 return executable
519 return None
519 return None
520
520
521 def setsignalhandler():
521 def setsignalhandler():
522 pass
522 pass
523
523
524 _wantedkinds = {stat.S_IFREG, stat.S_IFLNK}
524 _wantedkinds = {stat.S_IFREG, stat.S_IFLNK}
525
525
526 def statfiles(files):
526 def statfiles(files):
527 '''Stat each file in files. Yield each stat, or None if a file does not
527 '''Stat each file in files. Yield each stat, or None if a file does not
528 exist or has a type we don't care about.'''
528 exist or has a type we don't care about.'''
529 lstat = os.lstat
529 lstat = os.lstat
530 getkind = stat.S_IFMT
530 getkind = stat.S_IFMT
531 for nf in files:
531 for nf in files:
532 try:
532 try:
533 st = lstat(nf)
533 st = lstat(nf)
534 if getkind(st.st_mode) not in _wantedkinds:
534 if getkind(st.st_mode) not in _wantedkinds:
535 st = None
535 st = None
536 except OSError as err:
536 except OSError as err:
537 if err.errno not in (errno.ENOENT, errno.ENOTDIR):
537 if err.errno not in (errno.ENOENT, errno.ENOTDIR):
538 raise
538 raise
539 st = None
539 st = None
540 yield st
540 yield st
541
541
542 def getuser():
542 def getuser():
543 '''return name of current user'''
543 '''return name of current user'''
544 return pycompat.fsencode(getpass.getuser())
544 return pycompat.fsencode(getpass.getuser())
545
545
546 def username(uid=None):
546 def username(uid=None):
547 """Return the name of the user with the given uid.
547 """Return the name of the user with the given uid.
548
548
549 If uid is None, return the name of the current user."""
549 If uid is None, return the name of the current user."""
550
550
551 if uid is None:
551 if uid is None:
552 uid = os.getuid()
552 uid = os.getuid()
553 try:
553 try:
554 return pwd.getpwuid(uid)[0]
554 return pwd.getpwuid(uid)[0]
555 except KeyError:
555 except KeyError:
556 return str(uid)
556 return str(uid)
557
557
558 def groupname(gid=None):
558 def groupname(gid=None):
559 """Return the name of the group with the given gid.
559 """Return the name of the group with the given gid.
560
560
561 If gid is None, return the name of the current group."""
561 If gid is None, return the name of the current group."""
562
562
563 if gid is None:
563 if gid is None:
564 gid = os.getgid()
564 gid = os.getgid()
565 try:
565 try:
566 return grp.getgrgid(gid)[0]
566 return grp.getgrgid(gid)[0]
567 except KeyError:
567 except KeyError:
568 return str(gid)
568 return str(gid)
569
569
570 def groupmembers(name):
570 def groupmembers(name):
571 """Return the list of members of the group with the given
571 """Return the list of members of the group with the given
572 name, KeyError if the group does not exist.
572 name, KeyError if the group does not exist.
573 """
573 """
574 return list(grp.getgrnam(name).gr_mem)
574 return list(grp.getgrnam(name).gr_mem)
575
575
576 def spawndetached(args):
576 def spawndetached(args):
577 return os.spawnvp(os.P_NOWAIT | getattr(os, 'P_DETACH', 0),
577 return os.spawnvp(os.P_NOWAIT | getattr(os, 'P_DETACH', 0),
578 args[0], args)
578 args[0], args)
579
579
580 def gethgcmd():
580 def gethgcmd():
581 return sys.argv[:1]
581 return sys.argv[:1]
582
582
583 def makedir(path, notindexed):
583 def makedir(path, notindexed):
584 os.mkdir(path)
584 os.mkdir(path)
585
585
586 def lookupreg(key, name=None, scope=None):
586 def lookupreg(key, name=None, scope=None):
587 return None
587 return None
588
588
589 def hidewindow():
589 def hidewindow():
590 """Hide current shell window.
590 """Hide current shell window.
591
591
592 Used to hide the window opened when starting asynchronous
592 Used to hide the window opened when starting asynchronous
593 child process under Windows, unneeded on other systems.
593 child process under Windows, unneeded on other systems.
594 """
594 """
595 pass
595 pass
596
596
597 class cachestat(object):
597 class cachestat(object):
598 def __init__(self, path):
598 def __init__(self, path):
599 self.stat = os.stat(path)
599 self.stat = os.stat(path)
600
600
601 def cacheable(self):
601 def cacheable(self):
602 return bool(self.stat.st_ino)
602 return bool(self.stat.st_ino)
603
603
604 __hash__ = object.__hash__
604 __hash__ = object.__hash__
605
605
606 def __eq__(self, other):
606 def __eq__(self, other):
607 try:
607 try:
608 # Only dev, ino, size, mtime and atime are likely to change. Out
608 # Only dev, ino, size, mtime and atime are likely to change. Out
609 # of these, we shouldn't compare atime but should compare the
609 # of these, we shouldn't compare atime but should compare the
610 # rest. However, one of the other fields changing indicates
610 # rest. However, one of the other fields changing indicates
611 # something fishy going on, so return False if anything but atime
611 # something fishy going on, so return False if anything but atime
612 # changes.
612 # changes.
613 return (self.stat.st_mode == other.stat.st_mode and
613 return (self.stat.st_mode == other.stat.st_mode and
614 self.stat.st_ino == other.stat.st_ino and
614 self.stat.st_ino == other.stat.st_ino and
615 self.stat.st_dev == other.stat.st_dev and
615 self.stat.st_dev == other.stat.st_dev and
616 self.stat.st_nlink == other.stat.st_nlink and
616 self.stat.st_nlink == other.stat.st_nlink and
617 self.stat.st_uid == other.stat.st_uid and
617 self.stat.st_uid == other.stat.st_uid and
618 self.stat.st_gid == other.stat.st_gid and
618 self.stat.st_gid == other.stat.st_gid and
619 self.stat.st_size == other.stat.st_size and
619 self.stat.st_size == other.stat.st_size and
620 self.stat.st_mtime == other.stat.st_mtime and
620 self.stat[stat.ST_MTIME] == other.stat[stat.ST_MTIME] and
621 self.stat.st_ctime == other.stat.st_ctime)
621 self.stat[stat.ST_CTIME] == other.stat[stat.ST_CTIME])
622 except AttributeError:
622 except AttributeError:
623 return False
623 return False
624
624
625 def __ne__(self, other):
625 def __ne__(self, other):
626 return not self == other
626 return not self == other
627
627
628 def executablepath():
628 def executablepath():
629 return None # available on Windows only
629 return None # available on Windows only
630
630
631 def statislink(st):
631 def statislink(st):
632 '''check whether a stat result is a symlink'''
632 '''check whether a stat result is a symlink'''
633 return st and stat.S_ISLNK(st.st_mode)
633 return st and stat.S_ISLNK(st.st_mode)
634
634
635 def statisexec(st):
635 def statisexec(st):
636 '''check whether a stat result is an executable file'''
636 '''check whether a stat result is an executable file'''
637 return st and (st.st_mode & 0o100 != 0)
637 return st and (st.st_mode & 0o100 != 0)
638
638
639 def poll(fds):
639 def poll(fds):
640 """block until something happens on any file descriptor
640 """block until something happens on any file descriptor
641
641
642 This is a generic helper that will check for any activity
642 This is a generic helper that will check for any activity
643 (read, write. exception) and return the list of touched files.
643 (read, write. exception) and return the list of touched files.
644
644
645 In unsupported cases, it will raise a NotImplementedError"""
645 In unsupported cases, it will raise a NotImplementedError"""
646 try:
646 try:
647 while True:
647 while True:
648 try:
648 try:
649 res = select.select(fds, fds, fds)
649 res = select.select(fds, fds, fds)
650 break
650 break
651 except select.error as inst:
651 except select.error as inst:
652 if inst.args[0] == errno.EINTR:
652 if inst.args[0] == errno.EINTR:
653 continue
653 continue
654 raise
654 raise
655 except ValueError: # out of range file descriptor
655 except ValueError: # out of range file descriptor
656 raise NotImplementedError()
656 raise NotImplementedError()
657 return sorted(list(set(sum(res, []))))
657 return sorted(list(set(sum(res, []))))
658
658
659 def readpipe(pipe):
659 def readpipe(pipe):
660 """Read all available data from a pipe."""
660 """Read all available data from a pipe."""
661 # We can't fstat() a pipe because Linux will always report 0.
661 # We can't fstat() a pipe because Linux will always report 0.
662 # So, we set the pipe to non-blocking mode and read everything
662 # So, we set the pipe to non-blocking mode and read everything
663 # that's available.
663 # that's available.
664 flags = fcntl.fcntl(pipe, fcntl.F_GETFL)
664 flags = fcntl.fcntl(pipe, fcntl.F_GETFL)
665 flags |= os.O_NONBLOCK
665 flags |= os.O_NONBLOCK
666 oldflags = fcntl.fcntl(pipe, fcntl.F_SETFL, flags)
666 oldflags = fcntl.fcntl(pipe, fcntl.F_SETFL, flags)
667
667
668 try:
668 try:
669 chunks = []
669 chunks = []
670 while True:
670 while True:
671 try:
671 try:
672 s = pipe.read()
672 s = pipe.read()
673 if not s:
673 if not s:
674 break
674 break
675 chunks.append(s)
675 chunks.append(s)
676 except IOError:
676 except IOError:
677 break
677 break
678
678
679 return ''.join(chunks)
679 return ''.join(chunks)
680 finally:
680 finally:
681 fcntl.fcntl(pipe, fcntl.F_SETFL, oldflags)
681 fcntl.fcntl(pipe, fcntl.F_SETFL, oldflags)
682
682
683 def bindunixsocket(sock, path):
683 def bindunixsocket(sock, path):
684 """Bind the UNIX domain socket to the specified path"""
684 """Bind the UNIX domain socket to the specified path"""
685 # use relative path instead of full path at bind() if possible, since
685 # use relative path instead of full path at bind() if possible, since
686 # AF_UNIX path has very small length limit (107 chars) on common
686 # AF_UNIX path has very small length limit (107 chars) on common
687 # platforms (see sys/un.h)
687 # platforms (see sys/un.h)
688 dirname, basename = os.path.split(path)
688 dirname, basename = os.path.split(path)
689 bakwdfd = None
689 bakwdfd = None
690 if dirname:
690 if dirname:
691 bakwdfd = os.open('.', os.O_DIRECTORY)
691 bakwdfd = os.open('.', os.O_DIRECTORY)
692 os.chdir(dirname)
692 os.chdir(dirname)
693 sock.bind(basename)
693 sock.bind(basename)
694 if bakwdfd:
694 if bakwdfd:
695 os.fchdir(bakwdfd)
695 os.fchdir(bakwdfd)
696 os.close(bakwdfd)
696 os.close(bakwdfd)
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
General Comments 0
You need to be logged in to leave comments. Login now