##// END OF EJS Templates
extensions: change magic "shipped with hg" string...
Augie Fackler -
r29841:d5883fd0 default
parent child Browse files
Show More

The requested changes are too big and content was truncated. Show full diff

@@ -1,516 +1,516 b''
1 # synthrepo.py - repo synthesis
1 # synthrepo.py - repo synthesis
2 #
2 #
3 # Copyright 2012 Facebook
3 # Copyright 2012 Facebook
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 '''synthesize structurally interesting change history
8 '''synthesize structurally interesting change history
9
9
10 This extension is useful for creating a repository with properties
10 This extension is useful for creating a repository with properties
11 that are statistically similar to an existing repository. During
11 that are statistically similar to an existing repository. During
12 analysis, a simple probability table is constructed from the history
12 analysis, a simple probability table is constructed from the history
13 of an existing repository. During synthesis, these properties are
13 of an existing repository. During synthesis, these properties are
14 reconstructed.
14 reconstructed.
15
15
16 Properties that are analyzed and synthesized include the following:
16 Properties that are analyzed and synthesized include the following:
17
17
18 - Lines added or removed when an existing file is modified
18 - Lines added or removed when an existing file is modified
19 - Number and sizes of files added
19 - Number and sizes of files added
20 - Number of files removed
20 - Number of files removed
21 - Line lengths
21 - Line lengths
22 - Topological distance to parent changeset(s)
22 - Topological distance to parent changeset(s)
23 - Probability of a commit being a merge
23 - Probability of a commit being a merge
24 - Probability of a newly added file being added to a new directory
24 - Probability of a newly added file being added to a new directory
25 - Interarrival time, and time zone, of commits
25 - Interarrival time, and time zone, of commits
26 - Number of files in each directory
26 - Number of files in each directory
27
27
28 A few obvious properties that are not currently handled realistically:
28 A few obvious properties that are not currently handled realistically:
29
29
30 - Merges are treated as regular commits with two parents, which is not
30 - Merges are treated as regular commits with two parents, which is not
31 realistic
31 realistic
32 - Modifications are not treated as operations on hunks of lines, but
32 - Modifications are not treated as operations on hunks of lines, but
33 as insertions and deletions of randomly chosen single lines
33 as insertions and deletions of randomly chosen single lines
34 - Committer ID (always random)
34 - Committer ID (always random)
35 - Executability of files
35 - Executability of files
36 - Symlinks and binary files are ignored
36 - Symlinks and binary files are ignored
37 '''
37 '''
38
38
39 from __future__ import absolute_import
39 from __future__ import absolute_import
40 import bisect
40 import bisect
41 import collections
41 import collections
42 import itertools
42 import itertools
43 import json
43 import json
44 import os
44 import os
45 import random
45 import random
46 import sys
46 import sys
47 import time
47 import time
48
48
49 from mercurial.i18n import _
49 from mercurial.i18n import _
50 from mercurial.node import (
50 from mercurial.node import (
51 nullid,
51 nullid,
52 nullrev,
52 nullrev,
53 short,
53 short,
54 )
54 )
55 from mercurial import (
55 from mercurial import (
56 cmdutil,
56 cmdutil,
57 context,
57 context,
58 error,
58 error,
59 hg,
59 hg,
60 patch,
60 patch,
61 scmutil,
61 scmutil,
62 util,
62 util,
63 )
63 )
64
64
65 # Note for extension authors: ONLY specify testedwith = 'internal' for
65 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
66 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
66 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
67 # be specifying the version(s) of Mercurial they are tested with, or
67 # be specifying the version(s) of Mercurial they are tested with, or
68 # leave the attribute unspecified.
68 # leave the attribute unspecified.
69 testedwith = 'internal'
69 testedwith = 'ships-with-hg-core'
70
70
71 cmdtable = {}
71 cmdtable = {}
72 command = cmdutil.command(cmdtable)
72 command = cmdutil.command(cmdtable)
73
73
74 newfile = set(('new fi', 'rename', 'copy f', 'copy t'))
74 newfile = set(('new fi', 'rename', 'copy f', 'copy t'))
75
75
76 def zerodict():
76 def zerodict():
77 return collections.defaultdict(lambda: 0)
77 return collections.defaultdict(lambda: 0)
78
78
79 def roundto(x, k):
79 def roundto(x, k):
80 if x > k * 2:
80 if x > k * 2:
81 return int(round(x / float(k)) * k)
81 return int(round(x / float(k)) * k)
82 return int(round(x))
82 return int(round(x))
83
83
84 def parsegitdiff(lines):
84 def parsegitdiff(lines):
85 filename, mar, lineadd, lineremove = None, None, zerodict(), 0
85 filename, mar, lineadd, lineremove = None, None, zerodict(), 0
86 binary = False
86 binary = False
87 for line in lines:
87 for line in lines:
88 start = line[:6]
88 start = line[:6]
89 if start == 'diff -':
89 if start == 'diff -':
90 if filename:
90 if filename:
91 yield filename, mar, lineadd, lineremove, binary
91 yield filename, mar, lineadd, lineremove, binary
92 mar, lineadd, lineremove, binary = 'm', zerodict(), 0, False
92 mar, lineadd, lineremove, binary = 'm', zerodict(), 0, False
93 filename = patch.gitre.match(line).group(1)
93 filename = patch.gitre.match(line).group(1)
94 elif start in newfile:
94 elif start in newfile:
95 mar = 'a'
95 mar = 'a'
96 elif start == 'GIT bi':
96 elif start == 'GIT bi':
97 binary = True
97 binary = True
98 elif start == 'delete':
98 elif start == 'delete':
99 mar = 'r'
99 mar = 'r'
100 elif start:
100 elif start:
101 s = start[0]
101 s = start[0]
102 if s == '-' and not line.startswith('--- '):
102 if s == '-' and not line.startswith('--- '):
103 lineremove += 1
103 lineremove += 1
104 elif s == '+' and not line.startswith('+++ '):
104 elif s == '+' and not line.startswith('+++ '):
105 lineadd[roundto(len(line) - 1, 5)] += 1
105 lineadd[roundto(len(line) - 1, 5)] += 1
106 if filename:
106 if filename:
107 yield filename, mar, lineadd, lineremove, binary
107 yield filename, mar, lineadd, lineremove, binary
108
108
109 @command('analyze',
109 @command('analyze',
110 [('o', 'output', '', _('write output to given file'), _('FILE')),
110 [('o', 'output', '', _('write output to given file'), _('FILE')),
111 ('r', 'rev', [], _('analyze specified revisions'), _('REV'))],
111 ('r', 'rev', [], _('analyze specified revisions'), _('REV'))],
112 _('hg analyze'), optionalrepo=True)
112 _('hg analyze'), optionalrepo=True)
113 def analyze(ui, repo, *revs, **opts):
113 def analyze(ui, repo, *revs, **opts):
114 '''create a simple model of a repository to use for later synthesis
114 '''create a simple model of a repository to use for later synthesis
115
115
116 This command examines every changeset in the given range (or all
116 This command examines every changeset in the given range (or all
117 of history if none are specified) and creates a simple statistical
117 of history if none are specified) and creates a simple statistical
118 model of the history of the repository. It also measures the directory
118 model of the history of the repository. It also measures the directory
119 structure of the repository as checked out.
119 structure of the repository as checked out.
120
120
121 The model is written out to a JSON file, and can be used by
121 The model is written out to a JSON file, and can be used by
122 :hg:`synthesize` to create or augment a repository with synthetic
122 :hg:`synthesize` to create or augment a repository with synthetic
123 commits that have a structure that is statistically similar to the
123 commits that have a structure that is statistically similar to the
124 analyzed repository.
124 analyzed repository.
125 '''
125 '''
126 root = repo.root
126 root = repo.root
127 if not root.endswith(os.path.sep):
127 if not root.endswith(os.path.sep):
128 root += os.path.sep
128 root += os.path.sep
129
129
130 revs = list(revs)
130 revs = list(revs)
131 revs.extend(opts['rev'])
131 revs.extend(opts['rev'])
132 if not revs:
132 if not revs:
133 revs = [':']
133 revs = [':']
134
134
135 output = opts['output']
135 output = opts['output']
136 if not output:
136 if not output:
137 output = os.path.basename(root) + '.json'
137 output = os.path.basename(root) + '.json'
138
138
139 if output == '-':
139 if output == '-':
140 fp = sys.stdout
140 fp = sys.stdout
141 else:
141 else:
142 fp = open(output, 'w')
142 fp = open(output, 'w')
143
143
144 # Always obtain file counts of each directory in the given root directory.
144 # Always obtain file counts of each directory in the given root directory.
145 def onerror(e):
145 def onerror(e):
146 ui.warn(_('error walking directory structure: %s\n') % e)
146 ui.warn(_('error walking directory structure: %s\n') % e)
147
147
148 dirs = {}
148 dirs = {}
149 rootprefixlen = len(root)
149 rootprefixlen = len(root)
150 for dirpath, dirnames, filenames in os.walk(root, onerror=onerror):
150 for dirpath, dirnames, filenames in os.walk(root, onerror=onerror):
151 dirpathfromroot = dirpath[rootprefixlen:]
151 dirpathfromroot = dirpath[rootprefixlen:]
152 dirs[dirpathfromroot] = len(filenames)
152 dirs[dirpathfromroot] = len(filenames)
153 if '.hg' in dirnames:
153 if '.hg' in dirnames:
154 dirnames.remove('.hg')
154 dirnames.remove('.hg')
155
155
156 lineschanged = zerodict()
156 lineschanged = zerodict()
157 children = zerodict()
157 children = zerodict()
158 p1distance = zerodict()
158 p1distance = zerodict()
159 p2distance = zerodict()
159 p2distance = zerodict()
160 linesinfilesadded = zerodict()
160 linesinfilesadded = zerodict()
161 fileschanged = zerodict()
161 fileschanged = zerodict()
162 filesadded = zerodict()
162 filesadded = zerodict()
163 filesremoved = zerodict()
163 filesremoved = zerodict()
164 linelengths = zerodict()
164 linelengths = zerodict()
165 interarrival = zerodict()
165 interarrival = zerodict()
166 parents = zerodict()
166 parents = zerodict()
167 dirsadded = zerodict()
167 dirsadded = zerodict()
168 tzoffset = zerodict()
168 tzoffset = zerodict()
169
169
170 # If a mercurial repo is available, also model the commit history.
170 # If a mercurial repo is available, also model the commit history.
171 if repo:
171 if repo:
172 revs = scmutil.revrange(repo, revs)
172 revs = scmutil.revrange(repo, revs)
173 revs.sort()
173 revs.sort()
174
174
175 progress = ui.progress
175 progress = ui.progress
176 _analyzing = _('analyzing')
176 _analyzing = _('analyzing')
177 _changesets = _('changesets')
177 _changesets = _('changesets')
178 _total = len(revs)
178 _total = len(revs)
179
179
180 for i, rev in enumerate(revs):
180 for i, rev in enumerate(revs):
181 progress(_analyzing, i, unit=_changesets, total=_total)
181 progress(_analyzing, i, unit=_changesets, total=_total)
182 ctx = repo[rev]
182 ctx = repo[rev]
183 pl = ctx.parents()
183 pl = ctx.parents()
184 pctx = pl[0]
184 pctx = pl[0]
185 prev = pctx.rev()
185 prev = pctx.rev()
186 children[prev] += 1
186 children[prev] += 1
187 p1distance[rev - prev] += 1
187 p1distance[rev - prev] += 1
188 parents[len(pl)] += 1
188 parents[len(pl)] += 1
189 tzoffset[ctx.date()[1]] += 1
189 tzoffset[ctx.date()[1]] += 1
190 if len(pl) > 1:
190 if len(pl) > 1:
191 p2distance[rev - pl[1].rev()] += 1
191 p2distance[rev - pl[1].rev()] += 1
192 if prev == rev - 1:
192 if prev == rev - 1:
193 lastctx = pctx
193 lastctx = pctx
194 else:
194 else:
195 lastctx = repo[rev - 1]
195 lastctx = repo[rev - 1]
196 if lastctx.rev() != nullrev:
196 if lastctx.rev() != nullrev:
197 timedelta = ctx.date()[0] - lastctx.date()[0]
197 timedelta = ctx.date()[0] - lastctx.date()[0]
198 interarrival[roundto(timedelta, 300)] += 1
198 interarrival[roundto(timedelta, 300)] += 1
199 diff = sum((d.splitlines() for d in ctx.diff(pctx, git=True)), [])
199 diff = sum((d.splitlines() for d in ctx.diff(pctx, git=True)), [])
200 fileadds, diradds, fileremoves, filechanges = 0, 0, 0, 0
200 fileadds, diradds, fileremoves, filechanges = 0, 0, 0, 0
201 for filename, mar, lineadd, lineremove, isbin in parsegitdiff(diff):
201 for filename, mar, lineadd, lineremove, isbin in parsegitdiff(diff):
202 if isbin:
202 if isbin:
203 continue
203 continue
204 added = sum(lineadd.itervalues(), 0)
204 added = sum(lineadd.itervalues(), 0)
205 if mar == 'm':
205 if mar == 'm':
206 if added and lineremove:
206 if added and lineremove:
207 lineschanged[roundto(added, 5),
207 lineschanged[roundto(added, 5),
208 roundto(lineremove, 5)] += 1
208 roundto(lineremove, 5)] += 1
209 filechanges += 1
209 filechanges += 1
210 elif mar == 'a':
210 elif mar == 'a':
211 fileadds += 1
211 fileadds += 1
212 if '/' in filename:
212 if '/' in filename:
213 filedir = filename.rsplit('/', 1)[0]
213 filedir = filename.rsplit('/', 1)[0]
214 if filedir not in pctx.dirs():
214 if filedir not in pctx.dirs():
215 diradds += 1
215 diradds += 1
216 linesinfilesadded[roundto(added, 5)] += 1
216 linesinfilesadded[roundto(added, 5)] += 1
217 elif mar == 'r':
217 elif mar == 'r':
218 fileremoves += 1
218 fileremoves += 1
219 for length, count in lineadd.iteritems():
219 for length, count in lineadd.iteritems():
220 linelengths[length] += count
220 linelengths[length] += count
221 fileschanged[filechanges] += 1
221 fileschanged[filechanges] += 1
222 filesadded[fileadds] += 1
222 filesadded[fileadds] += 1
223 dirsadded[diradds] += 1
223 dirsadded[diradds] += 1
224 filesremoved[fileremoves] += 1
224 filesremoved[fileremoves] += 1
225
225
226 invchildren = zerodict()
226 invchildren = zerodict()
227
227
228 for rev, count in children.iteritems():
228 for rev, count in children.iteritems():
229 invchildren[count] += 1
229 invchildren[count] += 1
230
230
231 if output != '-':
231 if output != '-':
232 ui.status(_('writing output to %s\n') % output)
232 ui.status(_('writing output to %s\n') % output)
233
233
234 def pronk(d):
234 def pronk(d):
235 return sorted(d.iteritems(), key=lambda x: x[1], reverse=True)
235 return sorted(d.iteritems(), key=lambda x: x[1], reverse=True)
236
236
237 json.dump({'revs': len(revs),
237 json.dump({'revs': len(revs),
238 'initdirs': pronk(dirs),
238 'initdirs': pronk(dirs),
239 'lineschanged': pronk(lineschanged),
239 'lineschanged': pronk(lineschanged),
240 'children': pronk(invchildren),
240 'children': pronk(invchildren),
241 'fileschanged': pronk(fileschanged),
241 'fileschanged': pronk(fileschanged),
242 'filesadded': pronk(filesadded),
242 'filesadded': pronk(filesadded),
243 'linesinfilesadded': pronk(linesinfilesadded),
243 'linesinfilesadded': pronk(linesinfilesadded),
244 'dirsadded': pronk(dirsadded),
244 'dirsadded': pronk(dirsadded),
245 'filesremoved': pronk(filesremoved),
245 'filesremoved': pronk(filesremoved),
246 'linelengths': pronk(linelengths),
246 'linelengths': pronk(linelengths),
247 'parents': pronk(parents),
247 'parents': pronk(parents),
248 'p1distance': pronk(p1distance),
248 'p1distance': pronk(p1distance),
249 'p2distance': pronk(p2distance),
249 'p2distance': pronk(p2distance),
250 'interarrival': pronk(interarrival),
250 'interarrival': pronk(interarrival),
251 'tzoffset': pronk(tzoffset),
251 'tzoffset': pronk(tzoffset),
252 },
252 },
253 fp)
253 fp)
254 fp.close()
254 fp.close()
255
255
256 @command('synthesize',
256 @command('synthesize',
257 [('c', 'count', 0, _('create given number of commits'), _('COUNT')),
257 [('c', 'count', 0, _('create given number of commits'), _('COUNT')),
258 ('', 'dict', '', _('path to a dictionary of words'), _('FILE')),
258 ('', 'dict', '', _('path to a dictionary of words'), _('FILE')),
259 ('', 'initfiles', 0, _('initial file count to create'), _('COUNT'))],
259 ('', 'initfiles', 0, _('initial file count to create'), _('COUNT'))],
260 _('hg synthesize [OPTION].. DESCFILE'))
260 _('hg synthesize [OPTION].. DESCFILE'))
261 def synthesize(ui, repo, descpath, **opts):
261 def synthesize(ui, repo, descpath, **opts):
262 '''synthesize commits based on a model of an existing repository
262 '''synthesize commits based on a model of an existing repository
263
263
264 The model must have been generated by :hg:`analyze`. Commits will
264 The model must have been generated by :hg:`analyze`. Commits will
265 be generated randomly according to the probabilities described in
265 be generated randomly according to the probabilities described in
266 the model. If --initfiles is set, the repository will be seeded with
266 the model. If --initfiles is set, the repository will be seeded with
267 the given number files following the modeled repository's directory
267 the given number files following the modeled repository's directory
268 structure.
268 structure.
269
269
270 When synthesizing new content, commit descriptions, and user
270 When synthesizing new content, commit descriptions, and user
271 names, words will be chosen randomly from a dictionary that is
271 names, words will be chosen randomly from a dictionary that is
272 presumed to contain one word per line. Use --dict to specify the
272 presumed to contain one word per line. Use --dict to specify the
273 path to an alternate dictionary to use.
273 path to an alternate dictionary to use.
274 '''
274 '''
275 try:
275 try:
276 fp = hg.openpath(ui, descpath)
276 fp = hg.openpath(ui, descpath)
277 except Exception as err:
277 except Exception as err:
278 raise error.Abort('%s: %s' % (descpath, err[0].strerror))
278 raise error.Abort('%s: %s' % (descpath, err[0].strerror))
279 desc = json.load(fp)
279 desc = json.load(fp)
280 fp.close()
280 fp.close()
281
281
282 def cdf(l):
282 def cdf(l):
283 if not l:
283 if not l:
284 return [], []
284 return [], []
285 vals, probs = zip(*sorted(l, key=lambda x: x[1], reverse=True))
285 vals, probs = zip(*sorted(l, key=lambda x: x[1], reverse=True))
286 t = float(sum(probs, 0))
286 t = float(sum(probs, 0))
287 s, cdfs = 0, []
287 s, cdfs = 0, []
288 for v in probs:
288 for v in probs:
289 s += v
289 s += v
290 cdfs.append(s / t)
290 cdfs.append(s / t)
291 return vals, cdfs
291 return vals, cdfs
292
292
293 lineschanged = cdf(desc['lineschanged'])
293 lineschanged = cdf(desc['lineschanged'])
294 fileschanged = cdf(desc['fileschanged'])
294 fileschanged = cdf(desc['fileschanged'])
295 filesadded = cdf(desc['filesadded'])
295 filesadded = cdf(desc['filesadded'])
296 dirsadded = cdf(desc['dirsadded'])
296 dirsadded = cdf(desc['dirsadded'])
297 filesremoved = cdf(desc['filesremoved'])
297 filesremoved = cdf(desc['filesremoved'])
298 linelengths = cdf(desc['linelengths'])
298 linelengths = cdf(desc['linelengths'])
299 parents = cdf(desc['parents'])
299 parents = cdf(desc['parents'])
300 p1distance = cdf(desc['p1distance'])
300 p1distance = cdf(desc['p1distance'])
301 p2distance = cdf(desc['p2distance'])
301 p2distance = cdf(desc['p2distance'])
302 interarrival = cdf(desc['interarrival'])
302 interarrival = cdf(desc['interarrival'])
303 linesinfilesadded = cdf(desc['linesinfilesadded'])
303 linesinfilesadded = cdf(desc['linesinfilesadded'])
304 tzoffset = cdf(desc['tzoffset'])
304 tzoffset = cdf(desc['tzoffset'])
305
305
306 dictfile = opts.get('dict') or '/usr/share/dict/words'
306 dictfile = opts.get('dict') or '/usr/share/dict/words'
307 try:
307 try:
308 fp = open(dictfile, 'rU')
308 fp = open(dictfile, 'rU')
309 except IOError as err:
309 except IOError as err:
310 raise error.Abort('%s: %s' % (dictfile, err.strerror))
310 raise error.Abort('%s: %s' % (dictfile, err.strerror))
311 words = fp.read().splitlines()
311 words = fp.read().splitlines()
312 fp.close()
312 fp.close()
313
313
314 initdirs = {}
314 initdirs = {}
315 if desc['initdirs']:
315 if desc['initdirs']:
316 for k, v in desc['initdirs']:
316 for k, v in desc['initdirs']:
317 initdirs[k.encode('utf-8').replace('.hg', '_hg')] = v
317 initdirs[k.encode('utf-8').replace('.hg', '_hg')] = v
318 initdirs = renamedirs(initdirs, words)
318 initdirs = renamedirs(initdirs, words)
319 initdirscdf = cdf(initdirs)
319 initdirscdf = cdf(initdirs)
320
320
321 def pick(cdf):
321 def pick(cdf):
322 return cdf[0][bisect.bisect_left(cdf[1], random.random())]
322 return cdf[0][bisect.bisect_left(cdf[1], random.random())]
323
323
324 def pickpath():
324 def pickpath():
325 return os.path.join(pick(initdirscdf), random.choice(words))
325 return os.path.join(pick(initdirscdf), random.choice(words))
326
326
327 def makeline(minimum=0):
327 def makeline(minimum=0):
328 total = max(minimum, pick(linelengths))
328 total = max(minimum, pick(linelengths))
329 c, l = 0, []
329 c, l = 0, []
330 while c < total:
330 while c < total:
331 w = random.choice(words)
331 w = random.choice(words)
332 c += len(w) + 1
332 c += len(w) + 1
333 l.append(w)
333 l.append(w)
334 return ' '.join(l)
334 return ' '.join(l)
335
335
336 wlock = repo.wlock()
336 wlock = repo.wlock()
337 lock = repo.lock()
337 lock = repo.lock()
338
338
339 nevertouch = set(('.hgsub', '.hgignore', '.hgtags'))
339 nevertouch = set(('.hgsub', '.hgignore', '.hgtags'))
340
340
341 progress = ui.progress
341 progress = ui.progress
342 _synthesizing = _('synthesizing')
342 _synthesizing = _('synthesizing')
343 _files = _('initial files')
343 _files = _('initial files')
344 _changesets = _('changesets')
344 _changesets = _('changesets')
345
345
346 # Synthesize a single initial revision adding files to the repo according
346 # Synthesize a single initial revision adding files to the repo according
347 # to the modeled directory structure.
347 # to the modeled directory structure.
348 initcount = int(opts['initfiles'])
348 initcount = int(opts['initfiles'])
349 if initcount and initdirs:
349 if initcount and initdirs:
350 pctx = repo[None].parents()[0]
350 pctx = repo[None].parents()[0]
351 dirs = set(pctx.dirs())
351 dirs = set(pctx.dirs())
352 files = {}
352 files = {}
353
353
354 def validpath(path):
354 def validpath(path):
355 # Don't pick filenames which are already directory names.
355 # Don't pick filenames which are already directory names.
356 if path in dirs:
356 if path in dirs:
357 return False
357 return False
358 # Don't pick directories which were used as file names.
358 # Don't pick directories which were used as file names.
359 while path:
359 while path:
360 if path in files:
360 if path in files:
361 return False
361 return False
362 path = os.path.dirname(path)
362 path = os.path.dirname(path)
363 return True
363 return True
364
364
365 for i in xrange(0, initcount):
365 for i in xrange(0, initcount):
366 ui.progress(_synthesizing, i, unit=_files, total=initcount)
366 ui.progress(_synthesizing, i, unit=_files, total=initcount)
367
367
368 path = pickpath()
368 path = pickpath()
369 while not validpath(path):
369 while not validpath(path):
370 path = pickpath()
370 path = pickpath()
371 data = '%s contents\n' % path
371 data = '%s contents\n' % path
372 files[path] = context.memfilectx(repo, path, data)
372 files[path] = context.memfilectx(repo, path, data)
373 dir = os.path.dirname(path)
373 dir = os.path.dirname(path)
374 while dir and dir not in dirs:
374 while dir and dir not in dirs:
375 dirs.add(dir)
375 dirs.add(dir)
376 dir = os.path.dirname(dir)
376 dir = os.path.dirname(dir)
377
377
378 def filectxfn(repo, memctx, path):
378 def filectxfn(repo, memctx, path):
379 return files[path]
379 return files[path]
380
380
381 ui.progress(_synthesizing, None)
381 ui.progress(_synthesizing, None)
382 message = 'synthesized wide repo with %d files' % (len(files),)
382 message = 'synthesized wide repo with %d files' % (len(files),)
383 mc = context.memctx(repo, [pctx.node(), nullid], message,
383 mc = context.memctx(repo, [pctx.node(), nullid], message,
384 files.iterkeys(), filectxfn, ui.username(),
384 files.iterkeys(), filectxfn, ui.username(),
385 '%d %d' % util.makedate())
385 '%d %d' % util.makedate())
386 initnode = mc.commit()
386 initnode = mc.commit()
387 if ui.debugflag:
387 if ui.debugflag:
388 hexfn = hex
388 hexfn = hex
389 else:
389 else:
390 hexfn = short
390 hexfn = short
391 ui.status(_('added commit %s with %d files\n')
391 ui.status(_('added commit %s with %d files\n')
392 % (hexfn(initnode), len(files)))
392 % (hexfn(initnode), len(files)))
393
393
394 # Synthesize incremental revisions to the repository, adding repo depth.
394 # Synthesize incremental revisions to the repository, adding repo depth.
395 count = int(opts['count'])
395 count = int(opts['count'])
396 heads = set(map(repo.changelog.rev, repo.heads()))
396 heads = set(map(repo.changelog.rev, repo.heads()))
397 for i in xrange(count):
397 for i in xrange(count):
398 progress(_synthesizing, i, unit=_changesets, total=count)
398 progress(_synthesizing, i, unit=_changesets, total=count)
399
399
400 node = repo.changelog.node
400 node = repo.changelog.node
401 revs = len(repo)
401 revs = len(repo)
402
402
403 def pickhead(heads, distance):
403 def pickhead(heads, distance):
404 if heads:
404 if heads:
405 lheads = sorted(heads)
405 lheads = sorted(heads)
406 rev = revs - min(pick(distance), revs)
406 rev = revs - min(pick(distance), revs)
407 if rev < lheads[-1]:
407 if rev < lheads[-1]:
408 rev = lheads[bisect.bisect_left(lheads, rev)]
408 rev = lheads[bisect.bisect_left(lheads, rev)]
409 else:
409 else:
410 rev = lheads[-1]
410 rev = lheads[-1]
411 return rev, node(rev)
411 return rev, node(rev)
412 return nullrev, nullid
412 return nullrev, nullid
413
413
414 r1 = revs - min(pick(p1distance), revs)
414 r1 = revs - min(pick(p1distance), revs)
415 p1 = node(r1)
415 p1 = node(r1)
416
416
417 # the number of heads will grow without bound if we use a pure
417 # the number of heads will grow without bound if we use a pure
418 # model, so artificially constrain their proliferation
418 # model, so artificially constrain their proliferation
419 toomanyheads = len(heads) > random.randint(1, 20)
419 toomanyheads = len(heads) > random.randint(1, 20)
420 if p2distance[0] and (pick(parents) == 2 or toomanyheads):
420 if p2distance[0] and (pick(parents) == 2 or toomanyheads):
421 r2, p2 = pickhead(heads.difference([r1]), p2distance)
421 r2, p2 = pickhead(heads.difference([r1]), p2distance)
422 else:
422 else:
423 r2, p2 = nullrev, nullid
423 r2, p2 = nullrev, nullid
424
424
425 pl = [p1, p2]
425 pl = [p1, p2]
426 pctx = repo[r1]
426 pctx = repo[r1]
427 mf = pctx.manifest()
427 mf = pctx.manifest()
428 mfk = mf.keys()
428 mfk = mf.keys()
429 changes = {}
429 changes = {}
430 if mfk:
430 if mfk:
431 for __ in xrange(pick(fileschanged)):
431 for __ in xrange(pick(fileschanged)):
432 for __ in xrange(10):
432 for __ in xrange(10):
433 fctx = pctx.filectx(random.choice(mfk))
433 fctx = pctx.filectx(random.choice(mfk))
434 path = fctx.path()
434 path = fctx.path()
435 if not (path in nevertouch or fctx.isbinary() or
435 if not (path in nevertouch or fctx.isbinary() or
436 'l' in fctx.flags()):
436 'l' in fctx.flags()):
437 break
437 break
438 lines = fctx.data().splitlines()
438 lines = fctx.data().splitlines()
439 add, remove = pick(lineschanged)
439 add, remove = pick(lineschanged)
440 for __ in xrange(remove):
440 for __ in xrange(remove):
441 if not lines:
441 if not lines:
442 break
442 break
443 del lines[random.randrange(0, len(lines))]
443 del lines[random.randrange(0, len(lines))]
444 for __ in xrange(add):
444 for __ in xrange(add):
445 lines.insert(random.randint(0, len(lines)), makeline())
445 lines.insert(random.randint(0, len(lines)), makeline())
446 path = fctx.path()
446 path = fctx.path()
447 changes[path] = context.memfilectx(repo, path,
447 changes[path] = context.memfilectx(repo, path,
448 '\n'.join(lines) + '\n')
448 '\n'.join(lines) + '\n')
449 for __ in xrange(pick(filesremoved)):
449 for __ in xrange(pick(filesremoved)):
450 path = random.choice(mfk)
450 path = random.choice(mfk)
451 for __ in xrange(10):
451 for __ in xrange(10):
452 path = random.choice(mfk)
452 path = random.choice(mfk)
453 if path not in changes:
453 if path not in changes:
454 changes[path] = None
454 changes[path] = None
455 break
455 break
456 if filesadded:
456 if filesadded:
457 dirs = list(pctx.dirs())
457 dirs = list(pctx.dirs())
458 dirs.insert(0, '')
458 dirs.insert(0, '')
459 for __ in xrange(pick(filesadded)):
459 for __ in xrange(pick(filesadded)):
460 pathstr = ''
460 pathstr = ''
461 while pathstr in dirs:
461 while pathstr in dirs:
462 path = [random.choice(dirs)]
462 path = [random.choice(dirs)]
463 if pick(dirsadded):
463 if pick(dirsadded):
464 path.append(random.choice(words))
464 path.append(random.choice(words))
465 path.append(random.choice(words))
465 path.append(random.choice(words))
466 pathstr = '/'.join(filter(None, path))
466 pathstr = '/'.join(filter(None, path))
467 data = '\n'.join(makeline()
467 data = '\n'.join(makeline()
468 for __ in xrange(pick(linesinfilesadded))) + '\n'
468 for __ in xrange(pick(linesinfilesadded))) + '\n'
469 changes[pathstr] = context.memfilectx(repo, pathstr, data)
469 changes[pathstr] = context.memfilectx(repo, pathstr, data)
470 def filectxfn(repo, memctx, path):
470 def filectxfn(repo, memctx, path):
471 return changes[path]
471 return changes[path]
472 if not changes:
472 if not changes:
473 continue
473 continue
474 if revs:
474 if revs:
475 date = repo['tip'].date()[0] + pick(interarrival)
475 date = repo['tip'].date()[0] + pick(interarrival)
476 else:
476 else:
477 date = time.time() - (86400 * count)
477 date = time.time() - (86400 * count)
478 # dates in mercurial must be positive, fit in 32-bit signed integers.
478 # dates in mercurial must be positive, fit in 32-bit signed integers.
479 date = min(0x7fffffff, max(0, date))
479 date = min(0x7fffffff, max(0, date))
480 user = random.choice(words) + '@' + random.choice(words)
480 user = random.choice(words) + '@' + random.choice(words)
481 mc = context.memctx(repo, pl, makeline(minimum=2),
481 mc = context.memctx(repo, pl, makeline(minimum=2),
482 sorted(changes.iterkeys()),
482 sorted(changes.iterkeys()),
483 filectxfn, user, '%d %d' % (date, pick(tzoffset)))
483 filectxfn, user, '%d %d' % (date, pick(tzoffset)))
484 newnode = mc.commit()
484 newnode = mc.commit()
485 heads.add(repo.changelog.rev(newnode))
485 heads.add(repo.changelog.rev(newnode))
486 heads.discard(r1)
486 heads.discard(r1)
487 heads.discard(r2)
487 heads.discard(r2)
488
488
489 lock.release()
489 lock.release()
490 wlock.release()
490 wlock.release()
491
491
492 def renamedirs(dirs, words):
492 def renamedirs(dirs, words):
493 '''Randomly rename the directory names in the per-dir file count dict.'''
493 '''Randomly rename the directory names in the per-dir file count dict.'''
494 wordgen = itertools.cycle(words)
494 wordgen = itertools.cycle(words)
495 replacements = {'': ''}
495 replacements = {'': ''}
496 def rename(dirpath):
496 def rename(dirpath):
497 '''Recursively rename the directory and all path prefixes.
497 '''Recursively rename the directory and all path prefixes.
498
498
499 The mapping from path to renamed path is stored for all path prefixes
499 The mapping from path to renamed path is stored for all path prefixes
500 as in dynamic programming, ensuring linear runtime and consistent
500 as in dynamic programming, ensuring linear runtime and consistent
501 renaming regardless of iteration order through the model.
501 renaming regardless of iteration order through the model.
502 '''
502 '''
503 if dirpath in replacements:
503 if dirpath in replacements:
504 return replacements[dirpath]
504 return replacements[dirpath]
505 head, _ = os.path.split(dirpath)
505 head, _ = os.path.split(dirpath)
506 if head:
506 if head:
507 head = rename(head)
507 head = rename(head)
508 else:
508 else:
509 head = ''
509 head = ''
510 renamed = os.path.join(head, next(wordgen))
510 renamed = os.path.join(head, next(wordgen))
511 replacements[dirpath] = renamed
511 replacements[dirpath] = renamed
512 return renamed
512 return renamed
513 result = []
513 result = []
514 for dirpath, count in dirs.iteritems():
514 for dirpath, count in dirs.iteritems():
515 result.append([rename(dirpath.lstrip(os.sep)), count])
515 result.append([rename(dirpath.lstrip(os.sep)), count])
516 return result
516 return result
@@ -1,330 +1,330 b''
1 # acl.py - changeset access control for mercurial
1 # acl.py - changeset access control for mercurial
2 #
2 #
3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 '''hooks for controlling repository access
8 '''hooks for controlling repository access
9
9
10 This hook makes it possible to allow or deny write access to given
10 This hook makes it possible to allow or deny write access to given
11 branches and paths of a repository when receiving incoming changesets
11 branches and paths of a repository when receiving incoming changesets
12 via pretxnchangegroup and pretxncommit.
12 via pretxnchangegroup and pretxncommit.
13
13
14 The authorization is matched based on the local user name on the
14 The authorization is matched based on the local user name on the
15 system where the hook runs, and not the committer of the original
15 system where the hook runs, and not the committer of the original
16 changeset (since the latter is merely informative).
16 changeset (since the latter is merely informative).
17
17
18 The acl hook is best used along with a restricted shell like hgsh,
18 The acl hook is best used along with a restricted shell like hgsh,
19 preventing authenticating users from doing anything other than pushing
19 preventing authenticating users from doing anything other than pushing
20 or pulling. The hook is not safe to use if users have interactive
20 or pulling. The hook is not safe to use if users have interactive
21 shell access, as they can then disable the hook. Nor is it safe if
21 shell access, as they can then disable the hook. Nor is it safe if
22 remote users share an account, because then there is no way to
22 remote users share an account, because then there is no way to
23 distinguish them.
23 distinguish them.
24
24
25 The order in which access checks are performed is:
25 The order in which access checks are performed is:
26
26
27 1) Deny list for branches (section ``acl.deny.branches``)
27 1) Deny list for branches (section ``acl.deny.branches``)
28 2) Allow list for branches (section ``acl.allow.branches``)
28 2) Allow list for branches (section ``acl.allow.branches``)
29 3) Deny list for paths (section ``acl.deny``)
29 3) Deny list for paths (section ``acl.deny``)
30 4) Allow list for paths (section ``acl.allow``)
30 4) Allow list for paths (section ``acl.allow``)
31
31
32 The allow and deny sections take key-value pairs.
32 The allow and deny sections take key-value pairs.
33
33
34 Branch-based Access Control
34 Branch-based Access Control
35 ---------------------------
35 ---------------------------
36
36
37 Use the ``acl.deny.branches`` and ``acl.allow.branches`` sections to
37 Use the ``acl.deny.branches`` and ``acl.allow.branches`` sections to
38 have branch-based access control. Keys in these sections can be
38 have branch-based access control. Keys in these sections can be
39 either:
39 either:
40
40
41 - a branch name, or
41 - a branch name, or
42 - an asterisk, to match any branch;
42 - an asterisk, to match any branch;
43
43
44 The corresponding values can be either:
44 The corresponding values can be either:
45
45
46 - a comma-separated list containing users and groups, or
46 - a comma-separated list containing users and groups, or
47 - an asterisk, to match anyone;
47 - an asterisk, to match anyone;
48
48
49 You can add the "!" prefix to a user or group name to invert the sense
49 You can add the "!" prefix to a user or group name to invert the sense
50 of the match.
50 of the match.
51
51
52 Path-based Access Control
52 Path-based Access Control
53 -------------------------
53 -------------------------
54
54
55 Use the ``acl.deny`` and ``acl.allow`` sections to have path-based
55 Use the ``acl.deny`` and ``acl.allow`` sections to have path-based
56 access control. Keys in these sections accept a subtree pattern (with
56 access control. Keys in these sections accept a subtree pattern (with
57 a glob syntax by default). The corresponding values follow the same
57 a glob syntax by default). The corresponding values follow the same
58 syntax as the other sections above.
58 syntax as the other sections above.
59
59
60 Groups
60 Groups
61 ------
61 ------
62
62
63 Group names must be prefixed with an ``@`` symbol. Specifying a group
63 Group names must be prefixed with an ``@`` symbol. Specifying a group
64 name has the same effect as specifying all the users in that group.
64 name has the same effect as specifying all the users in that group.
65
65
66 You can define group members in the ``acl.groups`` section.
66 You can define group members in the ``acl.groups`` section.
67 If a group name is not defined there, and Mercurial is running under
67 If a group name is not defined there, and Mercurial is running under
68 a Unix-like system, the list of users will be taken from the OS.
68 a Unix-like system, the list of users will be taken from the OS.
69 Otherwise, an exception will be raised.
69 Otherwise, an exception will be raised.
70
70
71 Example Configuration
71 Example Configuration
72 ---------------------
72 ---------------------
73
73
74 ::
74 ::
75
75
76 [hooks]
76 [hooks]
77
77
78 # Use this if you want to check access restrictions at commit time
78 # Use this if you want to check access restrictions at commit time
79 pretxncommit.acl = python:hgext.acl.hook
79 pretxncommit.acl = python:hgext.acl.hook
80
80
81 # Use this if you want to check access restrictions for pull, push,
81 # Use this if you want to check access restrictions for pull, push,
82 # bundle and serve.
82 # bundle and serve.
83 pretxnchangegroup.acl = python:hgext.acl.hook
83 pretxnchangegroup.acl = python:hgext.acl.hook
84
84
85 [acl]
85 [acl]
86 # Allow or deny access for incoming changes only if their source is
86 # Allow or deny access for incoming changes only if their source is
87 # listed here, let them pass otherwise. Source is "serve" for all
87 # listed here, let them pass otherwise. Source is "serve" for all
88 # remote access (http or ssh), "push", "pull" or "bundle" when the
88 # remote access (http or ssh), "push", "pull" or "bundle" when the
89 # related commands are run locally.
89 # related commands are run locally.
90 # Default: serve
90 # Default: serve
91 sources = serve
91 sources = serve
92
92
93 [acl.deny.branches]
93 [acl.deny.branches]
94
94
95 # Everyone is denied to the frozen branch:
95 # Everyone is denied to the frozen branch:
96 frozen-branch = *
96 frozen-branch = *
97
97
98 # A bad user is denied on all branches:
98 # A bad user is denied on all branches:
99 * = bad-user
99 * = bad-user
100
100
101 [acl.allow.branches]
101 [acl.allow.branches]
102
102
103 # A few users are allowed on branch-a:
103 # A few users are allowed on branch-a:
104 branch-a = user-1, user-2, user-3
104 branch-a = user-1, user-2, user-3
105
105
106 # Only one user is allowed on branch-b:
106 # Only one user is allowed on branch-b:
107 branch-b = user-1
107 branch-b = user-1
108
108
109 # The super user is allowed on any branch:
109 # The super user is allowed on any branch:
110 * = super-user
110 * = super-user
111
111
112 # Everyone is allowed on branch-for-tests:
112 # Everyone is allowed on branch-for-tests:
113 branch-for-tests = *
113 branch-for-tests = *
114
114
115 [acl.deny]
115 [acl.deny]
116 # This list is checked first. If a match is found, acl.allow is not
116 # This list is checked first. If a match is found, acl.allow is not
117 # checked. All users are granted access if acl.deny is not present.
117 # checked. All users are granted access if acl.deny is not present.
118 # Format for both lists: glob pattern = user, ..., @group, ...
118 # Format for both lists: glob pattern = user, ..., @group, ...
119
119
120 # To match everyone, use an asterisk for the user:
120 # To match everyone, use an asterisk for the user:
121 # my/glob/pattern = *
121 # my/glob/pattern = *
122
122
123 # user6 will not have write access to any file:
123 # user6 will not have write access to any file:
124 ** = user6
124 ** = user6
125
125
126 # Group "hg-denied" will not have write access to any file:
126 # Group "hg-denied" will not have write access to any file:
127 ** = @hg-denied
127 ** = @hg-denied
128
128
129 # Nobody will be able to change "DONT-TOUCH-THIS.txt", despite
129 # Nobody will be able to change "DONT-TOUCH-THIS.txt", despite
130 # everyone being able to change all other files. See below.
130 # everyone being able to change all other files. See below.
131 src/main/resources/DONT-TOUCH-THIS.txt = *
131 src/main/resources/DONT-TOUCH-THIS.txt = *
132
132
133 [acl.allow]
133 [acl.allow]
134 # if acl.allow is not present, all users are allowed by default
134 # if acl.allow is not present, all users are allowed by default
135 # empty acl.allow = no users allowed
135 # empty acl.allow = no users allowed
136
136
137 # User "doc_writer" has write access to any file under the "docs"
137 # User "doc_writer" has write access to any file under the "docs"
138 # folder:
138 # folder:
139 docs/** = doc_writer
139 docs/** = doc_writer
140
140
141 # User "jack" and group "designers" have write access to any file
141 # User "jack" and group "designers" have write access to any file
142 # under the "images" folder:
142 # under the "images" folder:
143 images/** = jack, @designers
143 images/** = jack, @designers
144
144
145 # Everyone (except for "user6" and "@hg-denied" - see acl.deny above)
145 # Everyone (except for "user6" and "@hg-denied" - see acl.deny above)
146 # will have write access to any file under the "resources" folder
146 # will have write access to any file under the "resources" folder
147 # (except for 1 file. See acl.deny):
147 # (except for 1 file. See acl.deny):
148 src/main/resources/** = *
148 src/main/resources/** = *
149
149
150 .hgtags = release_engineer
150 .hgtags = release_engineer
151
151
152 Examples using the "!" prefix
152 Examples using the "!" prefix
153 .............................
153 .............................
154
154
155 Suppose there's a branch that only a given user (or group) should be able to
155 Suppose there's a branch that only a given user (or group) should be able to
156 push to, and you don't want to restrict access to any other branch that may
156 push to, and you don't want to restrict access to any other branch that may
157 be created.
157 be created.
158
158
159 The "!" prefix allows you to prevent anyone except a given user or group to
159 The "!" prefix allows you to prevent anyone except a given user or group to
160 push changesets in a given branch or path.
160 push changesets in a given branch or path.
161
161
162 In the examples below, we will:
162 In the examples below, we will:
163 1) Deny access to branch "ring" to anyone but user "gollum"
163 1) Deny access to branch "ring" to anyone but user "gollum"
164 2) Deny access to branch "lake" to anyone but members of the group "hobbit"
164 2) Deny access to branch "lake" to anyone but members of the group "hobbit"
165 3) Deny access to a file to anyone but user "gollum"
165 3) Deny access to a file to anyone but user "gollum"
166
166
167 ::
167 ::
168
168
169 [acl.allow.branches]
169 [acl.allow.branches]
170 # Empty
170 # Empty
171
171
172 [acl.deny.branches]
172 [acl.deny.branches]
173
173
174 # 1) only 'gollum' can commit to branch 'ring';
174 # 1) only 'gollum' can commit to branch 'ring';
175 # 'gollum' and anyone else can still commit to any other branch.
175 # 'gollum' and anyone else can still commit to any other branch.
176 ring = !gollum
176 ring = !gollum
177
177
178 # 2) only members of the group 'hobbit' can commit to branch 'lake';
178 # 2) only members of the group 'hobbit' can commit to branch 'lake';
179 # 'hobbit' members and anyone else can still commit to any other branch.
179 # 'hobbit' members and anyone else can still commit to any other branch.
180 lake = !@hobbit
180 lake = !@hobbit
181
181
182 # You can also deny access based on file paths:
182 # You can also deny access based on file paths:
183
183
184 [acl.allow]
184 [acl.allow]
185 # Empty
185 # Empty
186
186
187 [acl.deny]
187 [acl.deny]
188 # 3) only 'gollum' can change the file below;
188 # 3) only 'gollum' can change the file below;
189 # 'gollum' and anyone else can still change any other file.
189 # 'gollum' and anyone else can still change any other file.
190 /misty/mountains/cave/ring = !gollum
190 /misty/mountains/cave/ring = !gollum
191
191
192 '''
192 '''
193
193
194 from __future__ import absolute_import
194 from __future__ import absolute_import
195
195
196 import getpass
196 import getpass
197
197
198 from mercurial.i18n import _
198 from mercurial.i18n import _
199 from mercurial import (
199 from mercurial import (
200 error,
200 error,
201 match,
201 match,
202 util,
202 util,
203 )
203 )
204
204
205 urlreq = util.urlreq
205 urlreq = util.urlreq
206
206
207 # Note for extension authors: ONLY specify testedwith = 'internal' for
207 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
208 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
208 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
209 # be specifying the version(s) of Mercurial they are tested with, or
209 # be specifying the version(s) of Mercurial they are tested with, or
210 # leave the attribute unspecified.
210 # leave the attribute unspecified.
211 testedwith = 'internal'
211 testedwith = 'ships-with-hg-core'
212
212
213 def _getusers(ui, group):
213 def _getusers(ui, group):
214
214
215 # First, try to use group definition from section [acl.groups]
215 # First, try to use group definition from section [acl.groups]
216 hgrcusers = ui.configlist('acl.groups', group)
216 hgrcusers = ui.configlist('acl.groups', group)
217 if hgrcusers:
217 if hgrcusers:
218 return hgrcusers
218 return hgrcusers
219
219
220 ui.debug('acl: "%s" not defined in [acl.groups]\n' % group)
220 ui.debug('acl: "%s" not defined in [acl.groups]\n' % group)
221 # If no users found in group definition, get users from OS-level group
221 # If no users found in group definition, get users from OS-level group
222 try:
222 try:
223 return util.groupmembers(group)
223 return util.groupmembers(group)
224 except KeyError:
224 except KeyError:
225 raise error.Abort(_("group '%s' is undefined") % group)
225 raise error.Abort(_("group '%s' is undefined") % group)
226
226
227 def _usermatch(ui, user, usersorgroups):
227 def _usermatch(ui, user, usersorgroups):
228
228
229 if usersorgroups == '*':
229 if usersorgroups == '*':
230 return True
230 return True
231
231
232 for ug in usersorgroups.replace(',', ' ').split():
232 for ug in usersorgroups.replace(',', ' ').split():
233
233
234 if ug.startswith('!'):
234 if ug.startswith('!'):
235 # Test for excluded user or group. Format:
235 # Test for excluded user or group. Format:
236 # if ug is a user name: !username
236 # if ug is a user name: !username
237 # if ug is a group name: !@groupname
237 # if ug is a group name: !@groupname
238 ug = ug[1:]
238 ug = ug[1:]
239 if not ug.startswith('@') and user != ug \
239 if not ug.startswith('@') and user != ug \
240 or ug.startswith('@') and user not in _getusers(ui, ug[1:]):
240 or ug.startswith('@') and user not in _getusers(ui, ug[1:]):
241 return True
241 return True
242
242
243 # Test for user or group. Format:
243 # Test for user or group. Format:
244 # if ug is a user name: username
244 # if ug is a user name: username
245 # if ug is a group name: @groupname
245 # if ug is a group name: @groupname
246 elif user == ug \
246 elif user == ug \
247 or ug.startswith('@') and user in _getusers(ui, ug[1:]):
247 or ug.startswith('@') and user in _getusers(ui, ug[1:]):
248 return True
248 return True
249
249
250 return False
250 return False
251
251
252 def buildmatch(ui, repo, user, key):
252 def buildmatch(ui, repo, user, key):
253 '''return tuple of (match function, list enabled).'''
253 '''return tuple of (match function, list enabled).'''
254 if not ui.has_section(key):
254 if not ui.has_section(key):
255 ui.debug('acl: %s not enabled\n' % key)
255 ui.debug('acl: %s not enabled\n' % key)
256 return None
256 return None
257
257
258 pats = [pat for pat, users in ui.configitems(key)
258 pats = [pat for pat, users in ui.configitems(key)
259 if _usermatch(ui, user, users)]
259 if _usermatch(ui, user, users)]
260 ui.debug('acl: %s enabled, %d entries for user %s\n' %
260 ui.debug('acl: %s enabled, %d entries for user %s\n' %
261 (key, len(pats), user))
261 (key, len(pats), user))
262
262
263 # Branch-based ACL
263 # Branch-based ACL
264 if not repo:
264 if not repo:
265 if pats:
265 if pats:
266 # If there's an asterisk (meaning "any branch"), always return True;
266 # If there's an asterisk (meaning "any branch"), always return True;
267 # Otherwise, test if b is in pats
267 # Otherwise, test if b is in pats
268 if '*' in pats:
268 if '*' in pats:
269 return util.always
269 return util.always
270 return lambda b: b in pats
270 return lambda b: b in pats
271 return util.never
271 return util.never
272
272
273 # Path-based ACL
273 # Path-based ACL
274 if pats:
274 if pats:
275 return match.match(repo.root, '', pats)
275 return match.match(repo.root, '', pats)
276 return util.never
276 return util.never
277
277
278 def hook(ui, repo, hooktype, node=None, source=None, **kwargs):
278 def hook(ui, repo, hooktype, node=None, source=None, **kwargs):
279 if hooktype not in ['pretxnchangegroup', 'pretxncommit']:
279 if hooktype not in ['pretxnchangegroup', 'pretxncommit']:
280 raise error.Abort(_('config error - hook type "%s" cannot stop '
280 raise error.Abort(_('config error - hook type "%s" cannot stop '
281 'incoming changesets nor commits') % hooktype)
281 'incoming changesets nor commits') % hooktype)
282 if (hooktype == 'pretxnchangegroup' and
282 if (hooktype == 'pretxnchangegroup' and
283 source not in ui.config('acl', 'sources', 'serve').split()):
283 source not in ui.config('acl', 'sources', 'serve').split()):
284 ui.debug('acl: changes have source "%s" - skipping\n' % source)
284 ui.debug('acl: changes have source "%s" - skipping\n' % source)
285 return
285 return
286
286
287 user = None
287 user = None
288 if source == 'serve' and 'url' in kwargs:
288 if source == 'serve' and 'url' in kwargs:
289 url = kwargs['url'].split(':')
289 url = kwargs['url'].split(':')
290 if url[0] == 'remote' and url[1].startswith('http'):
290 if url[0] == 'remote' and url[1].startswith('http'):
291 user = urlreq.unquote(url[3])
291 user = urlreq.unquote(url[3])
292
292
293 if user is None:
293 if user is None:
294 user = getpass.getuser()
294 user = getpass.getuser()
295
295
296 ui.debug('acl: checking access for user "%s"\n' % user)
296 ui.debug('acl: checking access for user "%s"\n' % user)
297
297
298 # deprecated config: acl.config
298 # deprecated config: acl.config
299 cfg = ui.config('acl', 'config')
299 cfg = ui.config('acl', 'config')
300 if cfg:
300 if cfg:
301 ui.readconfig(cfg, sections=['acl.groups', 'acl.allow.branches',
301 ui.readconfig(cfg, sections=['acl.groups', 'acl.allow.branches',
302 'acl.deny.branches', 'acl.allow', 'acl.deny'])
302 'acl.deny.branches', 'acl.allow', 'acl.deny'])
303
303
304 allowbranches = buildmatch(ui, None, user, 'acl.allow.branches')
304 allowbranches = buildmatch(ui, None, user, 'acl.allow.branches')
305 denybranches = buildmatch(ui, None, user, 'acl.deny.branches')
305 denybranches = buildmatch(ui, None, user, 'acl.deny.branches')
306 allow = buildmatch(ui, repo, user, 'acl.allow')
306 allow = buildmatch(ui, repo, user, 'acl.allow')
307 deny = buildmatch(ui, repo, user, 'acl.deny')
307 deny = buildmatch(ui, repo, user, 'acl.deny')
308
308
309 for rev in xrange(repo[node], len(repo)):
309 for rev in xrange(repo[node], len(repo)):
310 ctx = repo[rev]
310 ctx = repo[rev]
311 branch = ctx.branch()
311 branch = ctx.branch()
312 if denybranches and denybranches(branch):
312 if denybranches and denybranches(branch):
313 raise error.Abort(_('acl: user "%s" denied on branch "%s"'
313 raise error.Abort(_('acl: user "%s" denied on branch "%s"'
314 ' (changeset "%s")')
314 ' (changeset "%s")')
315 % (user, branch, ctx))
315 % (user, branch, ctx))
316 if allowbranches and not allowbranches(branch):
316 if allowbranches and not allowbranches(branch):
317 raise error.Abort(_('acl: user "%s" not allowed on branch "%s"'
317 raise error.Abort(_('acl: user "%s" not allowed on branch "%s"'
318 ' (changeset "%s")')
318 ' (changeset "%s")')
319 % (user, branch, ctx))
319 % (user, branch, ctx))
320 ui.debug('acl: branch access granted: "%s" on branch "%s"\n'
320 ui.debug('acl: branch access granted: "%s" on branch "%s"\n'
321 % (ctx, branch))
321 % (ctx, branch))
322
322
323 for f in ctx.files():
323 for f in ctx.files():
324 if deny and deny(f):
324 if deny and deny(f):
325 raise error.Abort(_('acl: user "%s" denied on "%s"'
325 raise error.Abort(_('acl: user "%s" denied on "%s"'
326 ' (changeset "%s")') % (user, f, ctx))
326 ' (changeset "%s")') % (user, f, ctx))
327 if allow and not allow(f):
327 if allow and not allow(f):
328 raise error.Abort(_('acl: user "%s" not allowed on "%s"'
328 raise error.Abort(_('acl: user "%s" not allowed on "%s"'
329 ' (changeset "%s")') % (user, f, ctx))
329 ' (changeset "%s")') % (user, f, ctx))
330 ui.debug('acl: path access granted: "%s"\n' % ctx)
330 ui.debug('acl: path access granted: "%s"\n' % ctx)
@@ -1,250 +1,250 b''
1 # blackbox.py - log repository events to a file for post-mortem debugging
1 # blackbox.py - log repository events to a file for post-mortem debugging
2 #
2 #
3 # Copyright 2010 Nicolas Dumazet
3 # Copyright 2010 Nicolas Dumazet
4 # Copyright 2013 Facebook, Inc.
4 # Copyright 2013 Facebook, Inc.
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """log repository events to a blackbox for debugging
9 """log repository events to a blackbox for debugging
10
10
11 Logs event information to .hg/blackbox.log to help debug and diagnose problems.
11 Logs event information to .hg/blackbox.log to help debug and diagnose problems.
12 The events that get logged can be configured via the blackbox.track config key.
12 The events that get logged can be configured via the blackbox.track config key.
13
13
14 Examples::
14 Examples::
15
15
16 [blackbox]
16 [blackbox]
17 track = *
17 track = *
18 # dirty is *EXPENSIVE* (slow);
18 # dirty is *EXPENSIVE* (slow);
19 # each log entry indicates `+` if the repository is dirty, like :hg:`id`.
19 # each log entry indicates `+` if the repository is dirty, like :hg:`id`.
20 dirty = True
20 dirty = True
21 # record the source of log messages
21 # record the source of log messages
22 logsource = True
22 logsource = True
23
23
24 [blackbox]
24 [blackbox]
25 track = command, commandfinish, commandexception, exthook, pythonhook
25 track = command, commandfinish, commandexception, exthook, pythonhook
26
26
27 [blackbox]
27 [blackbox]
28 track = incoming
28 track = incoming
29
29
30 [blackbox]
30 [blackbox]
31 # limit the size of a log file
31 # limit the size of a log file
32 maxsize = 1.5 MB
32 maxsize = 1.5 MB
33 # rotate up to N log files when the current one gets too big
33 # rotate up to N log files when the current one gets too big
34 maxfiles = 3
34 maxfiles = 3
35
35
36 """
36 """
37
37
38 from __future__ import absolute_import
38 from __future__ import absolute_import
39
39
40 import errno
40 import errno
41 import re
41 import re
42
42
43 from mercurial.i18n import _
43 from mercurial.i18n import _
44 from mercurial.node import hex
44 from mercurial.node import hex
45
45
46 from mercurial import (
46 from mercurial import (
47 cmdutil,
47 cmdutil,
48 ui as uimod,
48 ui as uimod,
49 util,
49 util,
50 )
50 )
51
51
52 cmdtable = {}
52 cmdtable = {}
53 command = cmdutil.command(cmdtable)
53 command = cmdutil.command(cmdtable)
54 # Note for extension authors: ONLY specify testedwith = 'internal' for
54 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
55 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
55 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
56 # be specifying the version(s) of Mercurial they are tested with, or
56 # be specifying the version(s) of Mercurial they are tested with, or
57 # leave the attribute unspecified.
57 # leave the attribute unspecified.
58 testedwith = 'internal'
58 testedwith = 'ships-with-hg-core'
59 lastui = None
59 lastui = None
60
60
61 filehandles = {}
61 filehandles = {}
62
62
63 def _openlog(vfs):
63 def _openlog(vfs):
64 path = vfs.join('blackbox.log')
64 path = vfs.join('blackbox.log')
65 if path in filehandles:
65 if path in filehandles:
66 return filehandles[path]
66 return filehandles[path]
67 filehandles[path] = fp = vfs('blackbox.log', 'a')
67 filehandles[path] = fp = vfs('blackbox.log', 'a')
68 return fp
68 return fp
69
69
70 def _closelog(vfs):
70 def _closelog(vfs):
71 path = vfs.join('blackbox.log')
71 path = vfs.join('blackbox.log')
72 fp = filehandles[path]
72 fp = filehandles[path]
73 del filehandles[path]
73 del filehandles[path]
74 fp.close()
74 fp.close()
75
75
76 def wrapui(ui):
76 def wrapui(ui):
77 class blackboxui(ui.__class__):
77 class blackboxui(ui.__class__):
78 def __init__(self, src=None):
78 def __init__(self, src=None):
79 super(blackboxui, self).__init__(src)
79 super(blackboxui, self).__init__(src)
80 if src is None:
80 if src is None:
81 self._partialinit()
81 self._partialinit()
82 else:
82 else:
83 self._bbfp = getattr(src, '_bbfp', None)
83 self._bbfp = getattr(src, '_bbfp', None)
84 self._bbinlog = False
84 self._bbinlog = False
85 self._bbrepo = getattr(src, '_bbrepo', None)
85 self._bbrepo = getattr(src, '_bbrepo', None)
86 self._bbvfs = getattr(src, '_bbvfs', None)
86 self._bbvfs = getattr(src, '_bbvfs', None)
87
87
88 def _partialinit(self):
88 def _partialinit(self):
89 if util.safehasattr(self, '_bbvfs'):
89 if util.safehasattr(self, '_bbvfs'):
90 return
90 return
91 self._bbfp = None
91 self._bbfp = None
92 self._bbinlog = False
92 self._bbinlog = False
93 self._bbrepo = None
93 self._bbrepo = None
94 self._bbvfs = None
94 self._bbvfs = None
95
95
96 def copy(self):
96 def copy(self):
97 self._partialinit()
97 self._partialinit()
98 return self.__class__(self)
98 return self.__class__(self)
99
99
100 @util.propertycache
100 @util.propertycache
101 def track(self):
101 def track(self):
102 return self.configlist('blackbox', 'track', ['*'])
102 return self.configlist('blackbox', 'track', ['*'])
103
103
104 def _openlogfile(self):
104 def _openlogfile(self):
105 def rotate(oldpath, newpath):
105 def rotate(oldpath, newpath):
106 try:
106 try:
107 self._bbvfs.unlink(newpath)
107 self._bbvfs.unlink(newpath)
108 except OSError as err:
108 except OSError as err:
109 if err.errno != errno.ENOENT:
109 if err.errno != errno.ENOENT:
110 self.debug("warning: cannot remove '%s': %s\n" %
110 self.debug("warning: cannot remove '%s': %s\n" %
111 (newpath, err.strerror))
111 (newpath, err.strerror))
112 try:
112 try:
113 if newpath:
113 if newpath:
114 self._bbvfs.rename(oldpath, newpath)
114 self._bbvfs.rename(oldpath, newpath)
115 except OSError as err:
115 except OSError as err:
116 if err.errno != errno.ENOENT:
116 if err.errno != errno.ENOENT:
117 self.debug("warning: cannot rename '%s' to '%s': %s\n" %
117 self.debug("warning: cannot rename '%s' to '%s': %s\n" %
118 (newpath, oldpath, err.strerror))
118 (newpath, oldpath, err.strerror))
119
119
120 fp = _openlog(self._bbvfs)
120 fp = _openlog(self._bbvfs)
121 maxsize = self.configbytes('blackbox', 'maxsize', 1048576)
121 maxsize = self.configbytes('blackbox', 'maxsize', 1048576)
122 if maxsize > 0:
122 if maxsize > 0:
123 st = self._bbvfs.fstat(fp)
123 st = self._bbvfs.fstat(fp)
124 if st.st_size >= maxsize:
124 if st.st_size >= maxsize:
125 path = fp.name
125 path = fp.name
126 _closelog(self._bbvfs)
126 _closelog(self._bbvfs)
127 maxfiles = self.configint('blackbox', 'maxfiles', 7)
127 maxfiles = self.configint('blackbox', 'maxfiles', 7)
128 for i in xrange(maxfiles - 1, 1, -1):
128 for i in xrange(maxfiles - 1, 1, -1):
129 rotate(oldpath='%s.%d' % (path, i - 1),
129 rotate(oldpath='%s.%d' % (path, i - 1),
130 newpath='%s.%d' % (path, i))
130 newpath='%s.%d' % (path, i))
131 rotate(oldpath=path,
131 rotate(oldpath=path,
132 newpath=maxfiles > 0 and path + '.1')
132 newpath=maxfiles > 0 and path + '.1')
133 fp = _openlog(self._bbvfs)
133 fp = _openlog(self._bbvfs)
134 return fp
134 return fp
135
135
136 def _bbwrite(self, fmt, *args):
136 def _bbwrite(self, fmt, *args):
137 self._bbfp.write(fmt % args)
137 self._bbfp.write(fmt % args)
138 self._bbfp.flush()
138 self._bbfp.flush()
139
139
140 def log(self, event, *msg, **opts):
140 def log(self, event, *msg, **opts):
141 global lastui
141 global lastui
142 super(blackboxui, self).log(event, *msg, **opts)
142 super(blackboxui, self).log(event, *msg, **opts)
143 self._partialinit()
143 self._partialinit()
144
144
145 if not '*' in self.track and not event in self.track:
145 if not '*' in self.track and not event in self.track:
146 return
146 return
147
147
148 if self._bbfp:
148 if self._bbfp:
149 ui = self
149 ui = self
150 elif self._bbvfs:
150 elif self._bbvfs:
151 try:
151 try:
152 self._bbfp = self._openlogfile()
152 self._bbfp = self._openlogfile()
153 except (IOError, OSError) as err:
153 except (IOError, OSError) as err:
154 self.debug('warning: cannot write to blackbox.log: %s\n' %
154 self.debug('warning: cannot write to blackbox.log: %s\n' %
155 err.strerror)
155 err.strerror)
156 del self._bbvfs
156 del self._bbvfs
157 self._bbfp = None
157 self._bbfp = None
158 ui = self
158 ui = self
159 else:
159 else:
160 # certain ui instances exist outside the context of
160 # certain ui instances exist outside the context of
161 # a repo, so just default to the last blackbox that
161 # a repo, so just default to the last blackbox that
162 # was seen.
162 # was seen.
163 ui = lastui
163 ui = lastui
164
164
165 if not ui or not ui._bbfp:
165 if not ui or not ui._bbfp:
166 return
166 return
167 if not lastui or ui._bbrepo:
167 if not lastui or ui._bbrepo:
168 lastui = ui
168 lastui = ui
169 if ui._bbinlog:
169 if ui._bbinlog:
170 # recursion guard
170 # recursion guard
171 return
171 return
172 try:
172 try:
173 ui._bbinlog = True
173 ui._bbinlog = True
174 date = util.datestr(None, '%Y/%m/%d %H:%M:%S')
174 date = util.datestr(None, '%Y/%m/%d %H:%M:%S')
175 user = util.getuser()
175 user = util.getuser()
176 pid = str(util.getpid())
176 pid = str(util.getpid())
177 formattedmsg = msg[0] % msg[1:]
177 formattedmsg = msg[0] % msg[1:]
178 rev = '(unknown)'
178 rev = '(unknown)'
179 changed = ''
179 changed = ''
180 if ui._bbrepo:
180 if ui._bbrepo:
181 ctx = ui._bbrepo[None]
181 ctx = ui._bbrepo[None]
182 parents = ctx.parents()
182 parents = ctx.parents()
183 rev = ('+'.join([hex(p.node()) for p in parents]))
183 rev = ('+'.join([hex(p.node()) for p in parents]))
184 if (ui.configbool('blackbox', 'dirty', False) and (
184 if (ui.configbool('blackbox', 'dirty', False) and (
185 any(ui._bbrepo.status()) or
185 any(ui._bbrepo.status()) or
186 any(ctx.sub(s).dirty() for s in ctx.substate)
186 any(ctx.sub(s).dirty() for s in ctx.substate)
187 )):
187 )):
188 changed = '+'
188 changed = '+'
189 if ui.configbool('blackbox', 'logsource', False):
189 if ui.configbool('blackbox', 'logsource', False):
190 src = ' [%s]' % event
190 src = ' [%s]' % event
191 else:
191 else:
192 src = ''
192 src = ''
193 try:
193 try:
194 ui._bbwrite('%s %s @%s%s (%s)%s> %s',
194 ui._bbwrite('%s %s @%s%s (%s)%s> %s',
195 date, user, rev, changed, pid, src, formattedmsg)
195 date, user, rev, changed, pid, src, formattedmsg)
196 except IOError as err:
196 except IOError as err:
197 self.debug('warning: cannot write to blackbox.log: %s\n' %
197 self.debug('warning: cannot write to blackbox.log: %s\n' %
198 err.strerror)
198 err.strerror)
199 finally:
199 finally:
200 ui._bbinlog = False
200 ui._bbinlog = False
201
201
202 def setrepo(self, repo):
202 def setrepo(self, repo):
203 self._bbfp = None
203 self._bbfp = None
204 self._bbinlog = False
204 self._bbinlog = False
205 self._bbrepo = repo
205 self._bbrepo = repo
206 self._bbvfs = repo.vfs
206 self._bbvfs = repo.vfs
207
207
208 ui.__class__ = blackboxui
208 ui.__class__ = blackboxui
209 uimod.ui = blackboxui
209 uimod.ui = blackboxui
210
210
211 def uisetup(ui):
211 def uisetup(ui):
212 wrapui(ui)
212 wrapui(ui)
213
213
214 def reposetup(ui, repo):
214 def reposetup(ui, repo):
215 # During 'hg pull' a httppeer repo is created to represent the remote repo.
215 # During 'hg pull' a httppeer repo is created to represent the remote repo.
216 # It doesn't have a .hg directory to put a blackbox in, so we don't do
216 # It doesn't have a .hg directory to put a blackbox in, so we don't do
217 # the blackbox setup for it.
217 # the blackbox setup for it.
218 if not repo.local():
218 if not repo.local():
219 return
219 return
220
220
221 if util.safehasattr(ui, 'setrepo'):
221 if util.safehasattr(ui, 'setrepo'):
222 ui.setrepo(repo)
222 ui.setrepo(repo)
223
223
224 @command('^blackbox',
224 @command('^blackbox',
225 [('l', 'limit', 10, _('the number of events to show')),
225 [('l', 'limit', 10, _('the number of events to show')),
226 ],
226 ],
227 _('hg blackbox [OPTION]...'))
227 _('hg blackbox [OPTION]...'))
228 def blackbox(ui, repo, *revs, **opts):
228 def blackbox(ui, repo, *revs, **opts):
229 '''view the recent repository events
229 '''view the recent repository events
230 '''
230 '''
231
231
232 if not repo.vfs.exists('blackbox.log'):
232 if not repo.vfs.exists('blackbox.log'):
233 return
233 return
234
234
235 limit = opts.get('limit')
235 limit = opts.get('limit')
236 fp = repo.vfs('blackbox.log', 'r')
236 fp = repo.vfs('blackbox.log', 'r')
237 lines = fp.read().split('\n')
237 lines = fp.read().split('\n')
238
238
239 count = 0
239 count = 0
240 output = []
240 output = []
241 for line in reversed(lines):
241 for line in reversed(lines):
242 if count >= limit:
242 if count >= limit:
243 break
243 break
244
244
245 # count the commands by matching lines like: 2013/01/23 19:13:36 root>
245 # count the commands by matching lines like: 2013/01/23 19:13:36 root>
246 if re.match('^\d{4}/\d{2}/\d{2} \d{2}:\d{2}:\d{2} .*> .*', line):
246 if re.match('^\d{4}/\d{2}/\d{2} \d{2}:\d{2}:\d{2} .*> .*', line):
247 count += 1
247 count += 1
248 output.append(line)
248 output.append(line)
249
249
250 ui.status('\n'.join(reversed(output)))
250 ui.status('\n'.join(reversed(output)))
@@ -1,928 +1,928 b''
1 # bugzilla.py - bugzilla integration for mercurial
1 # bugzilla.py - bugzilla integration for mercurial
2 #
2 #
3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2011-4 Jim Hague <jim.hague@acm.org>
4 # Copyright 2011-4 Jim Hague <jim.hague@acm.org>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''hooks for integrating with the Bugzilla bug tracker
9 '''hooks for integrating with the Bugzilla bug tracker
10
10
11 This hook extension adds comments on bugs in Bugzilla when changesets
11 This hook extension adds comments on bugs in Bugzilla when changesets
12 that refer to bugs by Bugzilla ID are seen. The comment is formatted using
12 that refer to bugs by Bugzilla ID are seen. The comment is formatted using
13 the Mercurial template mechanism.
13 the Mercurial template mechanism.
14
14
15 The bug references can optionally include an update for Bugzilla of the
15 The bug references can optionally include an update for Bugzilla of the
16 hours spent working on the bug. Bugs can also be marked fixed.
16 hours spent working on the bug. Bugs can also be marked fixed.
17
17
18 Three basic modes of access to Bugzilla are provided:
18 Three basic modes of access to Bugzilla are provided:
19
19
20 1. Access via the Bugzilla XMLRPC interface. Requires Bugzilla 3.4 or later.
20 1. Access via the Bugzilla XMLRPC interface. Requires Bugzilla 3.4 or later.
21
21
22 2. Check data via the Bugzilla XMLRPC interface and submit bug change
22 2. Check data via the Bugzilla XMLRPC interface and submit bug change
23 via email to Bugzilla email interface. Requires Bugzilla 3.4 or later.
23 via email to Bugzilla email interface. Requires Bugzilla 3.4 or later.
24
24
25 3. Writing directly to the Bugzilla database. Only Bugzilla installations
25 3. Writing directly to the Bugzilla database. Only Bugzilla installations
26 using MySQL are supported. Requires Python MySQLdb.
26 using MySQL are supported. Requires Python MySQLdb.
27
27
28 Writing directly to the database is susceptible to schema changes, and
28 Writing directly to the database is susceptible to schema changes, and
29 relies on a Bugzilla contrib script to send out bug change
29 relies on a Bugzilla contrib script to send out bug change
30 notification emails. This script runs as the user running Mercurial,
30 notification emails. This script runs as the user running Mercurial,
31 must be run on the host with the Bugzilla install, and requires
31 must be run on the host with the Bugzilla install, and requires
32 permission to read Bugzilla configuration details and the necessary
32 permission to read Bugzilla configuration details and the necessary
33 MySQL user and password to have full access rights to the Bugzilla
33 MySQL user and password to have full access rights to the Bugzilla
34 database. For these reasons this access mode is now considered
34 database. For these reasons this access mode is now considered
35 deprecated, and will not be updated for new Bugzilla versions going
35 deprecated, and will not be updated for new Bugzilla versions going
36 forward. Only adding comments is supported in this access mode.
36 forward. Only adding comments is supported in this access mode.
37
37
38 Access via XMLRPC needs a Bugzilla username and password to be specified
38 Access via XMLRPC needs a Bugzilla username and password to be specified
39 in the configuration. Comments are added under that username. Since the
39 in the configuration. Comments are added under that username. Since the
40 configuration must be readable by all Mercurial users, it is recommended
40 configuration must be readable by all Mercurial users, it is recommended
41 that the rights of that user are restricted in Bugzilla to the minimum
41 that the rights of that user are restricted in Bugzilla to the minimum
42 necessary to add comments. Marking bugs fixed requires Bugzilla 4.0 and later.
42 necessary to add comments. Marking bugs fixed requires Bugzilla 4.0 and later.
43
43
44 Access via XMLRPC/email uses XMLRPC to query Bugzilla, but sends
44 Access via XMLRPC/email uses XMLRPC to query Bugzilla, but sends
45 email to the Bugzilla email interface to submit comments to bugs.
45 email to the Bugzilla email interface to submit comments to bugs.
46 The From: address in the email is set to the email address of the Mercurial
46 The From: address in the email is set to the email address of the Mercurial
47 user, so the comment appears to come from the Mercurial user. In the event
47 user, so the comment appears to come from the Mercurial user. In the event
48 that the Mercurial user email is not recognized by Bugzilla as a Bugzilla
48 that the Mercurial user email is not recognized by Bugzilla as a Bugzilla
49 user, the email associated with the Bugzilla username used to log into
49 user, the email associated with the Bugzilla username used to log into
50 Bugzilla is used instead as the source of the comment. Marking bugs fixed
50 Bugzilla is used instead as the source of the comment. Marking bugs fixed
51 works on all supported Bugzilla versions.
51 works on all supported Bugzilla versions.
52
52
53 Configuration items common to all access modes:
53 Configuration items common to all access modes:
54
54
55 bugzilla.version
55 bugzilla.version
56 The access type to use. Values recognized are:
56 The access type to use. Values recognized are:
57
57
58 :``xmlrpc``: Bugzilla XMLRPC interface.
58 :``xmlrpc``: Bugzilla XMLRPC interface.
59 :``xmlrpc+email``: Bugzilla XMLRPC and email interfaces.
59 :``xmlrpc+email``: Bugzilla XMLRPC and email interfaces.
60 :``3.0``: MySQL access, Bugzilla 3.0 and later.
60 :``3.0``: MySQL access, Bugzilla 3.0 and later.
61 :``2.18``: MySQL access, Bugzilla 2.18 and up to but not
61 :``2.18``: MySQL access, Bugzilla 2.18 and up to but not
62 including 3.0.
62 including 3.0.
63 :``2.16``: MySQL access, Bugzilla 2.16 and up to but not
63 :``2.16``: MySQL access, Bugzilla 2.16 and up to but not
64 including 2.18.
64 including 2.18.
65
65
66 bugzilla.regexp
66 bugzilla.regexp
67 Regular expression to match bug IDs for update in changeset commit message.
67 Regular expression to match bug IDs for update in changeset commit message.
68 It must contain one "()" named group ``<ids>`` containing the bug
68 It must contain one "()" named group ``<ids>`` containing the bug
69 IDs separated by non-digit characters. It may also contain
69 IDs separated by non-digit characters. It may also contain
70 a named group ``<hours>`` with a floating-point number giving the
70 a named group ``<hours>`` with a floating-point number giving the
71 hours worked on the bug. If no named groups are present, the first
71 hours worked on the bug. If no named groups are present, the first
72 "()" group is assumed to contain the bug IDs, and work time is not
72 "()" group is assumed to contain the bug IDs, and work time is not
73 updated. The default expression matches ``Bug 1234``, ``Bug no. 1234``,
73 updated. The default expression matches ``Bug 1234``, ``Bug no. 1234``,
74 ``Bug number 1234``, ``Bugs 1234,5678``, ``Bug 1234 and 5678`` and
74 ``Bug number 1234``, ``Bugs 1234,5678``, ``Bug 1234 and 5678`` and
75 variations thereof, followed by an hours number prefixed by ``h`` or
75 variations thereof, followed by an hours number prefixed by ``h`` or
76 ``hours``, e.g. ``hours 1.5``. Matching is case insensitive.
76 ``hours``, e.g. ``hours 1.5``. Matching is case insensitive.
77
77
78 bugzilla.fixregexp
78 bugzilla.fixregexp
79 Regular expression to match bug IDs for marking fixed in changeset
79 Regular expression to match bug IDs for marking fixed in changeset
80 commit message. This must contain a "()" named group ``<ids>` containing
80 commit message. This must contain a "()" named group ``<ids>` containing
81 the bug IDs separated by non-digit characters. It may also contain
81 the bug IDs separated by non-digit characters. It may also contain
82 a named group ``<hours>`` with a floating-point number giving the
82 a named group ``<hours>`` with a floating-point number giving the
83 hours worked on the bug. If no named groups are present, the first
83 hours worked on the bug. If no named groups are present, the first
84 "()" group is assumed to contain the bug IDs, and work time is not
84 "()" group is assumed to contain the bug IDs, and work time is not
85 updated. The default expression matches ``Fixes 1234``, ``Fixes bug 1234``,
85 updated. The default expression matches ``Fixes 1234``, ``Fixes bug 1234``,
86 ``Fixes bugs 1234,5678``, ``Fixes 1234 and 5678`` and
86 ``Fixes bugs 1234,5678``, ``Fixes 1234 and 5678`` and
87 variations thereof, followed by an hours number prefixed by ``h`` or
87 variations thereof, followed by an hours number prefixed by ``h`` or
88 ``hours``, e.g. ``hours 1.5``. Matching is case insensitive.
88 ``hours``, e.g. ``hours 1.5``. Matching is case insensitive.
89
89
90 bugzilla.fixstatus
90 bugzilla.fixstatus
91 The status to set a bug to when marking fixed. Default ``RESOLVED``.
91 The status to set a bug to when marking fixed. Default ``RESOLVED``.
92
92
93 bugzilla.fixresolution
93 bugzilla.fixresolution
94 The resolution to set a bug to when marking fixed. Default ``FIXED``.
94 The resolution to set a bug to when marking fixed. Default ``FIXED``.
95
95
96 bugzilla.style
96 bugzilla.style
97 The style file to use when formatting comments.
97 The style file to use when formatting comments.
98
98
99 bugzilla.template
99 bugzilla.template
100 Template to use when formatting comments. Overrides style if
100 Template to use when formatting comments. Overrides style if
101 specified. In addition to the usual Mercurial keywords, the
101 specified. In addition to the usual Mercurial keywords, the
102 extension specifies:
102 extension specifies:
103
103
104 :``{bug}``: The Bugzilla bug ID.
104 :``{bug}``: The Bugzilla bug ID.
105 :``{root}``: The full pathname of the Mercurial repository.
105 :``{root}``: The full pathname of the Mercurial repository.
106 :``{webroot}``: Stripped pathname of the Mercurial repository.
106 :``{webroot}``: Stripped pathname of the Mercurial repository.
107 :``{hgweb}``: Base URL for browsing Mercurial repositories.
107 :``{hgweb}``: Base URL for browsing Mercurial repositories.
108
108
109 Default ``changeset {node|short} in repo {root} refers to bug
109 Default ``changeset {node|short} in repo {root} refers to bug
110 {bug}.\\ndetails:\\n\\t{desc|tabindent}``
110 {bug}.\\ndetails:\\n\\t{desc|tabindent}``
111
111
112 bugzilla.strip
112 bugzilla.strip
113 The number of path separator characters to strip from the front of
113 The number of path separator characters to strip from the front of
114 the Mercurial repository path (``{root}`` in templates) to produce
114 the Mercurial repository path (``{root}`` in templates) to produce
115 ``{webroot}``. For example, a repository with ``{root}``
115 ``{webroot}``. For example, a repository with ``{root}``
116 ``/var/local/my-project`` with a strip of 2 gives a value for
116 ``/var/local/my-project`` with a strip of 2 gives a value for
117 ``{webroot}`` of ``my-project``. Default 0.
117 ``{webroot}`` of ``my-project``. Default 0.
118
118
119 web.baseurl
119 web.baseurl
120 Base URL for browsing Mercurial repositories. Referenced from
120 Base URL for browsing Mercurial repositories. Referenced from
121 templates as ``{hgweb}``.
121 templates as ``{hgweb}``.
122
122
123 Configuration items common to XMLRPC+email and MySQL access modes:
123 Configuration items common to XMLRPC+email and MySQL access modes:
124
124
125 bugzilla.usermap
125 bugzilla.usermap
126 Path of file containing Mercurial committer email to Bugzilla user email
126 Path of file containing Mercurial committer email to Bugzilla user email
127 mappings. If specified, the file should contain one mapping per
127 mappings. If specified, the file should contain one mapping per
128 line::
128 line::
129
129
130 committer = Bugzilla user
130 committer = Bugzilla user
131
131
132 See also the ``[usermap]`` section.
132 See also the ``[usermap]`` section.
133
133
134 The ``[usermap]`` section is used to specify mappings of Mercurial
134 The ``[usermap]`` section is used to specify mappings of Mercurial
135 committer email to Bugzilla user email. See also ``bugzilla.usermap``.
135 committer email to Bugzilla user email. See also ``bugzilla.usermap``.
136 Contains entries of the form ``committer = Bugzilla user``.
136 Contains entries of the form ``committer = Bugzilla user``.
137
137
138 XMLRPC access mode configuration:
138 XMLRPC access mode configuration:
139
139
140 bugzilla.bzurl
140 bugzilla.bzurl
141 The base URL for the Bugzilla installation.
141 The base URL for the Bugzilla installation.
142 Default ``http://localhost/bugzilla``.
142 Default ``http://localhost/bugzilla``.
143
143
144 bugzilla.user
144 bugzilla.user
145 The username to use to log into Bugzilla via XMLRPC. Default
145 The username to use to log into Bugzilla via XMLRPC. Default
146 ``bugs``.
146 ``bugs``.
147
147
148 bugzilla.password
148 bugzilla.password
149 The password for Bugzilla login.
149 The password for Bugzilla login.
150
150
151 XMLRPC+email access mode uses the XMLRPC access mode configuration items,
151 XMLRPC+email access mode uses the XMLRPC access mode configuration items,
152 and also:
152 and also:
153
153
154 bugzilla.bzemail
154 bugzilla.bzemail
155 The Bugzilla email address.
155 The Bugzilla email address.
156
156
157 In addition, the Mercurial email settings must be configured. See the
157 In addition, the Mercurial email settings must be configured. See the
158 documentation in hgrc(5), sections ``[email]`` and ``[smtp]``.
158 documentation in hgrc(5), sections ``[email]`` and ``[smtp]``.
159
159
160 MySQL access mode configuration:
160 MySQL access mode configuration:
161
161
162 bugzilla.host
162 bugzilla.host
163 Hostname of the MySQL server holding the Bugzilla database.
163 Hostname of the MySQL server holding the Bugzilla database.
164 Default ``localhost``.
164 Default ``localhost``.
165
165
166 bugzilla.db
166 bugzilla.db
167 Name of the Bugzilla database in MySQL. Default ``bugs``.
167 Name of the Bugzilla database in MySQL. Default ``bugs``.
168
168
169 bugzilla.user
169 bugzilla.user
170 Username to use to access MySQL server. Default ``bugs``.
170 Username to use to access MySQL server. Default ``bugs``.
171
171
172 bugzilla.password
172 bugzilla.password
173 Password to use to access MySQL server.
173 Password to use to access MySQL server.
174
174
175 bugzilla.timeout
175 bugzilla.timeout
176 Database connection timeout (seconds). Default 5.
176 Database connection timeout (seconds). Default 5.
177
177
178 bugzilla.bzuser
178 bugzilla.bzuser
179 Fallback Bugzilla user name to record comments with, if changeset
179 Fallback Bugzilla user name to record comments with, if changeset
180 committer cannot be found as a Bugzilla user.
180 committer cannot be found as a Bugzilla user.
181
181
182 bugzilla.bzdir
182 bugzilla.bzdir
183 Bugzilla install directory. Used by default notify. Default
183 Bugzilla install directory. Used by default notify. Default
184 ``/var/www/html/bugzilla``.
184 ``/var/www/html/bugzilla``.
185
185
186 bugzilla.notify
186 bugzilla.notify
187 The command to run to get Bugzilla to send bug change notification
187 The command to run to get Bugzilla to send bug change notification
188 emails. Substitutes from a map with 3 keys, ``bzdir``, ``id`` (bug
188 emails. Substitutes from a map with 3 keys, ``bzdir``, ``id`` (bug
189 id) and ``user`` (committer bugzilla email). Default depends on
189 id) and ``user`` (committer bugzilla email). Default depends on
190 version; from 2.18 it is "cd %(bzdir)s && perl -T
190 version; from 2.18 it is "cd %(bzdir)s && perl -T
191 contrib/sendbugmail.pl %(id)s %(user)s".
191 contrib/sendbugmail.pl %(id)s %(user)s".
192
192
193 Activating the extension::
193 Activating the extension::
194
194
195 [extensions]
195 [extensions]
196 bugzilla =
196 bugzilla =
197
197
198 [hooks]
198 [hooks]
199 # run bugzilla hook on every change pulled or pushed in here
199 # run bugzilla hook on every change pulled or pushed in here
200 incoming.bugzilla = python:hgext.bugzilla.hook
200 incoming.bugzilla = python:hgext.bugzilla.hook
201
201
202 Example configurations:
202 Example configurations:
203
203
204 XMLRPC example configuration. This uses the Bugzilla at
204 XMLRPC example configuration. This uses the Bugzilla at
205 ``http://my-project.org/bugzilla``, logging in as user
205 ``http://my-project.org/bugzilla``, logging in as user
206 ``bugmail@my-project.org`` with password ``plugh``. It is used with a
206 ``bugmail@my-project.org`` with password ``plugh``. It is used with a
207 collection of Mercurial repositories in ``/var/local/hg/repos/``,
207 collection of Mercurial repositories in ``/var/local/hg/repos/``,
208 with a web interface at ``http://my-project.org/hg``. ::
208 with a web interface at ``http://my-project.org/hg``. ::
209
209
210 [bugzilla]
210 [bugzilla]
211 bzurl=http://my-project.org/bugzilla
211 bzurl=http://my-project.org/bugzilla
212 user=bugmail@my-project.org
212 user=bugmail@my-project.org
213 password=plugh
213 password=plugh
214 version=xmlrpc
214 version=xmlrpc
215 template=Changeset {node|short} in {root|basename}.
215 template=Changeset {node|short} in {root|basename}.
216 {hgweb}/{webroot}/rev/{node|short}\\n
216 {hgweb}/{webroot}/rev/{node|short}\\n
217 {desc}\\n
217 {desc}\\n
218 strip=5
218 strip=5
219
219
220 [web]
220 [web]
221 baseurl=http://my-project.org/hg
221 baseurl=http://my-project.org/hg
222
222
223 XMLRPC+email example configuration. This uses the Bugzilla at
223 XMLRPC+email example configuration. This uses the Bugzilla at
224 ``http://my-project.org/bugzilla``, logging in as user
224 ``http://my-project.org/bugzilla``, logging in as user
225 ``bugmail@my-project.org`` with password ``plugh``. It is used with a
225 ``bugmail@my-project.org`` with password ``plugh``. It is used with a
226 collection of Mercurial repositories in ``/var/local/hg/repos/``,
226 collection of Mercurial repositories in ``/var/local/hg/repos/``,
227 with a web interface at ``http://my-project.org/hg``. Bug comments
227 with a web interface at ``http://my-project.org/hg``. Bug comments
228 are sent to the Bugzilla email address
228 are sent to the Bugzilla email address
229 ``bugzilla@my-project.org``. ::
229 ``bugzilla@my-project.org``. ::
230
230
231 [bugzilla]
231 [bugzilla]
232 bzurl=http://my-project.org/bugzilla
232 bzurl=http://my-project.org/bugzilla
233 user=bugmail@my-project.org
233 user=bugmail@my-project.org
234 password=plugh
234 password=plugh
235 version=xmlrpc+email
235 version=xmlrpc+email
236 bzemail=bugzilla@my-project.org
236 bzemail=bugzilla@my-project.org
237 template=Changeset {node|short} in {root|basename}.
237 template=Changeset {node|short} in {root|basename}.
238 {hgweb}/{webroot}/rev/{node|short}\\n
238 {hgweb}/{webroot}/rev/{node|short}\\n
239 {desc}\\n
239 {desc}\\n
240 strip=5
240 strip=5
241
241
242 [web]
242 [web]
243 baseurl=http://my-project.org/hg
243 baseurl=http://my-project.org/hg
244
244
245 [usermap]
245 [usermap]
246 user@emaildomain.com=user.name@bugzilladomain.com
246 user@emaildomain.com=user.name@bugzilladomain.com
247
247
248 MySQL example configuration. This has a local Bugzilla 3.2 installation
248 MySQL example configuration. This has a local Bugzilla 3.2 installation
249 in ``/opt/bugzilla-3.2``. The MySQL database is on ``localhost``,
249 in ``/opt/bugzilla-3.2``. The MySQL database is on ``localhost``,
250 the Bugzilla database name is ``bugs`` and MySQL is
250 the Bugzilla database name is ``bugs`` and MySQL is
251 accessed with MySQL username ``bugs`` password ``XYZZY``. It is used
251 accessed with MySQL username ``bugs`` password ``XYZZY``. It is used
252 with a collection of Mercurial repositories in ``/var/local/hg/repos/``,
252 with a collection of Mercurial repositories in ``/var/local/hg/repos/``,
253 with a web interface at ``http://my-project.org/hg``. ::
253 with a web interface at ``http://my-project.org/hg``. ::
254
254
255 [bugzilla]
255 [bugzilla]
256 host=localhost
256 host=localhost
257 password=XYZZY
257 password=XYZZY
258 version=3.0
258 version=3.0
259 bzuser=unknown@domain.com
259 bzuser=unknown@domain.com
260 bzdir=/opt/bugzilla-3.2
260 bzdir=/opt/bugzilla-3.2
261 template=Changeset {node|short} in {root|basename}.
261 template=Changeset {node|short} in {root|basename}.
262 {hgweb}/{webroot}/rev/{node|short}\\n
262 {hgweb}/{webroot}/rev/{node|short}\\n
263 {desc}\\n
263 {desc}\\n
264 strip=5
264 strip=5
265
265
266 [web]
266 [web]
267 baseurl=http://my-project.org/hg
267 baseurl=http://my-project.org/hg
268
268
269 [usermap]
269 [usermap]
270 user@emaildomain.com=user.name@bugzilladomain.com
270 user@emaildomain.com=user.name@bugzilladomain.com
271
271
272 All the above add a comment to the Bugzilla bug record of the form::
272 All the above add a comment to the Bugzilla bug record of the form::
273
273
274 Changeset 3b16791d6642 in repository-name.
274 Changeset 3b16791d6642 in repository-name.
275 http://my-project.org/hg/repository-name/rev/3b16791d6642
275 http://my-project.org/hg/repository-name/rev/3b16791d6642
276
276
277 Changeset commit comment. Bug 1234.
277 Changeset commit comment. Bug 1234.
278 '''
278 '''
279
279
280 from __future__ import absolute_import
280 from __future__ import absolute_import
281
281
282 import re
282 import re
283 import time
283 import time
284
284
285 from mercurial.i18n import _
285 from mercurial.i18n import _
286 from mercurial.node import short
286 from mercurial.node import short
287 from mercurial import (
287 from mercurial import (
288 cmdutil,
288 cmdutil,
289 error,
289 error,
290 mail,
290 mail,
291 util,
291 util,
292 )
292 )
293
293
294 urlparse = util.urlparse
294 urlparse = util.urlparse
295 xmlrpclib = util.xmlrpclib
295 xmlrpclib = util.xmlrpclib
296
296
297 # Note for extension authors: ONLY specify testedwith = 'internal' for
297 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
298 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
298 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
299 # be specifying the version(s) of Mercurial they are tested with, or
299 # be specifying the version(s) of Mercurial they are tested with, or
300 # leave the attribute unspecified.
300 # leave the attribute unspecified.
301 testedwith = 'internal'
301 testedwith = 'ships-with-hg-core'
302
302
303 class bzaccess(object):
303 class bzaccess(object):
304 '''Base class for access to Bugzilla.'''
304 '''Base class for access to Bugzilla.'''
305
305
306 def __init__(self, ui):
306 def __init__(self, ui):
307 self.ui = ui
307 self.ui = ui
308 usermap = self.ui.config('bugzilla', 'usermap')
308 usermap = self.ui.config('bugzilla', 'usermap')
309 if usermap:
309 if usermap:
310 self.ui.readconfig(usermap, sections=['usermap'])
310 self.ui.readconfig(usermap, sections=['usermap'])
311
311
312 def map_committer(self, user):
312 def map_committer(self, user):
313 '''map name of committer to Bugzilla user name.'''
313 '''map name of committer to Bugzilla user name.'''
314 for committer, bzuser in self.ui.configitems('usermap'):
314 for committer, bzuser in self.ui.configitems('usermap'):
315 if committer.lower() == user.lower():
315 if committer.lower() == user.lower():
316 return bzuser
316 return bzuser
317 return user
317 return user
318
318
319 # Methods to be implemented by access classes.
319 # Methods to be implemented by access classes.
320 #
320 #
321 # 'bugs' is a dict keyed on bug id, where values are a dict holding
321 # 'bugs' is a dict keyed on bug id, where values are a dict holding
322 # updates to bug state. Recognized dict keys are:
322 # updates to bug state. Recognized dict keys are:
323 #
323 #
324 # 'hours': Value, float containing work hours to be updated.
324 # 'hours': Value, float containing work hours to be updated.
325 # 'fix': If key present, bug is to be marked fixed. Value ignored.
325 # 'fix': If key present, bug is to be marked fixed. Value ignored.
326
326
327 def filter_real_bug_ids(self, bugs):
327 def filter_real_bug_ids(self, bugs):
328 '''remove bug IDs that do not exist in Bugzilla from bugs.'''
328 '''remove bug IDs that do not exist in Bugzilla from bugs.'''
329 pass
329 pass
330
330
331 def filter_cset_known_bug_ids(self, node, bugs):
331 def filter_cset_known_bug_ids(self, node, bugs):
332 '''remove bug IDs where node occurs in comment text from bugs.'''
332 '''remove bug IDs where node occurs in comment text from bugs.'''
333 pass
333 pass
334
334
335 def updatebug(self, bugid, newstate, text, committer):
335 def updatebug(self, bugid, newstate, text, committer):
336 '''update the specified bug. Add comment text and set new states.
336 '''update the specified bug. Add comment text and set new states.
337
337
338 If possible add the comment as being from the committer of
338 If possible add the comment as being from the committer of
339 the changeset. Otherwise use the default Bugzilla user.
339 the changeset. Otherwise use the default Bugzilla user.
340 '''
340 '''
341 pass
341 pass
342
342
343 def notify(self, bugs, committer):
343 def notify(self, bugs, committer):
344 '''Force sending of Bugzilla notification emails.
344 '''Force sending of Bugzilla notification emails.
345
345
346 Only required if the access method does not trigger notification
346 Only required if the access method does not trigger notification
347 emails automatically.
347 emails automatically.
348 '''
348 '''
349 pass
349 pass
350
350
351 # Bugzilla via direct access to MySQL database.
351 # Bugzilla via direct access to MySQL database.
352 class bzmysql(bzaccess):
352 class bzmysql(bzaccess):
353 '''Support for direct MySQL access to Bugzilla.
353 '''Support for direct MySQL access to Bugzilla.
354
354
355 The earliest Bugzilla version this is tested with is version 2.16.
355 The earliest Bugzilla version this is tested with is version 2.16.
356
356
357 If your Bugzilla is version 3.4 or above, you are strongly
357 If your Bugzilla is version 3.4 or above, you are strongly
358 recommended to use the XMLRPC access method instead.
358 recommended to use the XMLRPC access method instead.
359 '''
359 '''
360
360
361 @staticmethod
361 @staticmethod
362 def sql_buglist(ids):
362 def sql_buglist(ids):
363 '''return SQL-friendly list of bug ids'''
363 '''return SQL-friendly list of bug ids'''
364 return '(' + ','.join(map(str, ids)) + ')'
364 return '(' + ','.join(map(str, ids)) + ')'
365
365
366 _MySQLdb = None
366 _MySQLdb = None
367
367
368 def __init__(self, ui):
368 def __init__(self, ui):
369 try:
369 try:
370 import MySQLdb as mysql
370 import MySQLdb as mysql
371 bzmysql._MySQLdb = mysql
371 bzmysql._MySQLdb = mysql
372 except ImportError as err:
372 except ImportError as err:
373 raise error.Abort(_('python mysql support not available: %s') % err)
373 raise error.Abort(_('python mysql support not available: %s') % err)
374
374
375 bzaccess.__init__(self, ui)
375 bzaccess.__init__(self, ui)
376
376
377 host = self.ui.config('bugzilla', 'host', 'localhost')
377 host = self.ui.config('bugzilla', 'host', 'localhost')
378 user = self.ui.config('bugzilla', 'user', 'bugs')
378 user = self.ui.config('bugzilla', 'user', 'bugs')
379 passwd = self.ui.config('bugzilla', 'password')
379 passwd = self.ui.config('bugzilla', 'password')
380 db = self.ui.config('bugzilla', 'db', 'bugs')
380 db = self.ui.config('bugzilla', 'db', 'bugs')
381 timeout = int(self.ui.config('bugzilla', 'timeout', 5))
381 timeout = int(self.ui.config('bugzilla', 'timeout', 5))
382 self.ui.note(_('connecting to %s:%s as %s, password %s\n') %
382 self.ui.note(_('connecting to %s:%s as %s, password %s\n') %
383 (host, db, user, '*' * len(passwd)))
383 (host, db, user, '*' * len(passwd)))
384 self.conn = bzmysql._MySQLdb.connect(host=host,
384 self.conn = bzmysql._MySQLdb.connect(host=host,
385 user=user, passwd=passwd,
385 user=user, passwd=passwd,
386 db=db,
386 db=db,
387 connect_timeout=timeout)
387 connect_timeout=timeout)
388 self.cursor = self.conn.cursor()
388 self.cursor = self.conn.cursor()
389 self.longdesc_id = self.get_longdesc_id()
389 self.longdesc_id = self.get_longdesc_id()
390 self.user_ids = {}
390 self.user_ids = {}
391 self.default_notify = "cd %(bzdir)s && ./processmail %(id)s %(user)s"
391 self.default_notify = "cd %(bzdir)s && ./processmail %(id)s %(user)s"
392
392
393 def run(self, *args, **kwargs):
393 def run(self, *args, **kwargs):
394 '''run a query.'''
394 '''run a query.'''
395 self.ui.note(_('query: %s %s\n') % (args, kwargs))
395 self.ui.note(_('query: %s %s\n') % (args, kwargs))
396 try:
396 try:
397 self.cursor.execute(*args, **kwargs)
397 self.cursor.execute(*args, **kwargs)
398 except bzmysql._MySQLdb.MySQLError:
398 except bzmysql._MySQLdb.MySQLError:
399 self.ui.note(_('failed query: %s %s\n') % (args, kwargs))
399 self.ui.note(_('failed query: %s %s\n') % (args, kwargs))
400 raise
400 raise
401
401
402 def get_longdesc_id(self):
402 def get_longdesc_id(self):
403 '''get identity of longdesc field'''
403 '''get identity of longdesc field'''
404 self.run('select fieldid from fielddefs where name = "longdesc"')
404 self.run('select fieldid from fielddefs where name = "longdesc"')
405 ids = self.cursor.fetchall()
405 ids = self.cursor.fetchall()
406 if len(ids) != 1:
406 if len(ids) != 1:
407 raise error.Abort(_('unknown database schema'))
407 raise error.Abort(_('unknown database schema'))
408 return ids[0][0]
408 return ids[0][0]
409
409
410 def filter_real_bug_ids(self, bugs):
410 def filter_real_bug_ids(self, bugs):
411 '''filter not-existing bugs from set.'''
411 '''filter not-existing bugs from set.'''
412 self.run('select bug_id from bugs where bug_id in %s' %
412 self.run('select bug_id from bugs where bug_id in %s' %
413 bzmysql.sql_buglist(bugs.keys()))
413 bzmysql.sql_buglist(bugs.keys()))
414 existing = [id for (id,) in self.cursor.fetchall()]
414 existing = [id for (id,) in self.cursor.fetchall()]
415 for id in bugs.keys():
415 for id in bugs.keys():
416 if id not in existing:
416 if id not in existing:
417 self.ui.status(_('bug %d does not exist\n') % id)
417 self.ui.status(_('bug %d does not exist\n') % id)
418 del bugs[id]
418 del bugs[id]
419
419
420 def filter_cset_known_bug_ids(self, node, bugs):
420 def filter_cset_known_bug_ids(self, node, bugs):
421 '''filter bug ids that already refer to this changeset from set.'''
421 '''filter bug ids that already refer to this changeset from set.'''
422 self.run('''select bug_id from longdescs where
422 self.run('''select bug_id from longdescs where
423 bug_id in %s and thetext like "%%%s%%"''' %
423 bug_id in %s and thetext like "%%%s%%"''' %
424 (bzmysql.sql_buglist(bugs.keys()), short(node)))
424 (bzmysql.sql_buglist(bugs.keys()), short(node)))
425 for (id,) in self.cursor.fetchall():
425 for (id,) in self.cursor.fetchall():
426 self.ui.status(_('bug %d already knows about changeset %s\n') %
426 self.ui.status(_('bug %d already knows about changeset %s\n') %
427 (id, short(node)))
427 (id, short(node)))
428 del bugs[id]
428 del bugs[id]
429
429
430 def notify(self, bugs, committer):
430 def notify(self, bugs, committer):
431 '''tell bugzilla to send mail.'''
431 '''tell bugzilla to send mail.'''
432 self.ui.status(_('telling bugzilla to send mail:\n'))
432 self.ui.status(_('telling bugzilla to send mail:\n'))
433 (user, userid) = self.get_bugzilla_user(committer)
433 (user, userid) = self.get_bugzilla_user(committer)
434 for id in bugs.keys():
434 for id in bugs.keys():
435 self.ui.status(_(' bug %s\n') % id)
435 self.ui.status(_(' bug %s\n') % id)
436 cmdfmt = self.ui.config('bugzilla', 'notify', self.default_notify)
436 cmdfmt = self.ui.config('bugzilla', 'notify', self.default_notify)
437 bzdir = self.ui.config('bugzilla', 'bzdir',
437 bzdir = self.ui.config('bugzilla', 'bzdir',
438 '/var/www/html/bugzilla')
438 '/var/www/html/bugzilla')
439 try:
439 try:
440 # Backwards-compatible with old notify string, which
440 # Backwards-compatible with old notify string, which
441 # took one string. This will throw with a new format
441 # took one string. This will throw with a new format
442 # string.
442 # string.
443 cmd = cmdfmt % id
443 cmd = cmdfmt % id
444 except TypeError:
444 except TypeError:
445 cmd = cmdfmt % {'bzdir': bzdir, 'id': id, 'user': user}
445 cmd = cmdfmt % {'bzdir': bzdir, 'id': id, 'user': user}
446 self.ui.note(_('running notify command %s\n') % cmd)
446 self.ui.note(_('running notify command %s\n') % cmd)
447 fp = util.popen('(%s) 2>&1' % cmd)
447 fp = util.popen('(%s) 2>&1' % cmd)
448 out = fp.read()
448 out = fp.read()
449 ret = fp.close()
449 ret = fp.close()
450 if ret:
450 if ret:
451 self.ui.warn(out)
451 self.ui.warn(out)
452 raise error.Abort(_('bugzilla notify command %s') %
452 raise error.Abort(_('bugzilla notify command %s') %
453 util.explainexit(ret)[0])
453 util.explainexit(ret)[0])
454 self.ui.status(_('done\n'))
454 self.ui.status(_('done\n'))
455
455
456 def get_user_id(self, user):
456 def get_user_id(self, user):
457 '''look up numeric bugzilla user id.'''
457 '''look up numeric bugzilla user id.'''
458 try:
458 try:
459 return self.user_ids[user]
459 return self.user_ids[user]
460 except KeyError:
460 except KeyError:
461 try:
461 try:
462 userid = int(user)
462 userid = int(user)
463 except ValueError:
463 except ValueError:
464 self.ui.note(_('looking up user %s\n') % user)
464 self.ui.note(_('looking up user %s\n') % user)
465 self.run('''select userid from profiles
465 self.run('''select userid from profiles
466 where login_name like %s''', user)
466 where login_name like %s''', user)
467 all = self.cursor.fetchall()
467 all = self.cursor.fetchall()
468 if len(all) != 1:
468 if len(all) != 1:
469 raise KeyError(user)
469 raise KeyError(user)
470 userid = int(all[0][0])
470 userid = int(all[0][0])
471 self.user_ids[user] = userid
471 self.user_ids[user] = userid
472 return userid
472 return userid
473
473
474 def get_bugzilla_user(self, committer):
474 def get_bugzilla_user(self, committer):
475 '''See if committer is a registered bugzilla user. Return
475 '''See if committer is a registered bugzilla user. Return
476 bugzilla username and userid if so. If not, return default
476 bugzilla username and userid if so. If not, return default
477 bugzilla username and userid.'''
477 bugzilla username and userid.'''
478 user = self.map_committer(committer)
478 user = self.map_committer(committer)
479 try:
479 try:
480 userid = self.get_user_id(user)
480 userid = self.get_user_id(user)
481 except KeyError:
481 except KeyError:
482 try:
482 try:
483 defaultuser = self.ui.config('bugzilla', 'bzuser')
483 defaultuser = self.ui.config('bugzilla', 'bzuser')
484 if not defaultuser:
484 if not defaultuser:
485 raise error.Abort(_('cannot find bugzilla user id for %s') %
485 raise error.Abort(_('cannot find bugzilla user id for %s') %
486 user)
486 user)
487 userid = self.get_user_id(defaultuser)
487 userid = self.get_user_id(defaultuser)
488 user = defaultuser
488 user = defaultuser
489 except KeyError:
489 except KeyError:
490 raise error.Abort(_('cannot find bugzilla user id for %s or %s')
490 raise error.Abort(_('cannot find bugzilla user id for %s or %s')
491 % (user, defaultuser))
491 % (user, defaultuser))
492 return (user, userid)
492 return (user, userid)
493
493
494 def updatebug(self, bugid, newstate, text, committer):
494 def updatebug(self, bugid, newstate, text, committer):
495 '''update bug state with comment text.
495 '''update bug state with comment text.
496
496
497 Try adding comment as committer of changeset, otherwise as
497 Try adding comment as committer of changeset, otherwise as
498 default bugzilla user.'''
498 default bugzilla user.'''
499 if len(newstate) > 0:
499 if len(newstate) > 0:
500 self.ui.warn(_("Bugzilla/MySQL cannot update bug state\n"))
500 self.ui.warn(_("Bugzilla/MySQL cannot update bug state\n"))
501
501
502 (user, userid) = self.get_bugzilla_user(committer)
502 (user, userid) = self.get_bugzilla_user(committer)
503 now = time.strftime('%Y-%m-%d %H:%M:%S')
503 now = time.strftime('%Y-%m-%d %H:%M:%S')
504 self.run('''insert into longdescs
504 self.run('''insert into longdescs
505 (bug_id, who, bug_when, thetext)
505 (bug_id, who, bug_when, thetext)
506 values (%s, %s, %s, %s)''',
506 values (%s, %s, %s, %s)''',
507 (bugid, userid, now, text))
507 (bugid, userid, now, text))
508 self.run('''insert into bugs_activity (bug_id, who, bug_when, fieldid)
508 self.run('''insert into bugs_activity (bug_id, who, bug_when, fieldid)
509 values (%s, %s, %s, %s)''',
509 values (%s, %s, %s, %s)''',
510 (bugid, userid, now, self.longdesc_id))
510 (bugid, userid, now, self.longdesc_id))
511 self.conn.commit()
511 self.conn.commit()
512
512
513 class bzmysql_2_18(bzmysql):
513 class bzmysql_2_18(bzmysql):
514 '''support for bugzilla 2.18 series.'''
514 '''support for bugzilla 2.18 series.'''
515
515
516 def __init__(self, ui):
516 def __init__(self, ui):
517 bzmysql.__init__(self, ui)
517 bzmysql.__init__(self, ui)
518 self.default_notify = \
518 self.default_notify = \
519 "cd %(bzdir)s && perl -T contrib/sendbugmail.pl %(id)s %(user)s"
519 "cd %(bzdir)s && perl -T contrib/sendbugmail.pl %(id)s %(user)s"
520
520
521 class bzmysql_3_0(bzmysql_2_18):
521 class bzmysql_3_0(bzmysql_2_18):
522 '''support for bugzilla 3.0 series.'''
522 '''support for bugzilla 3.0 series.'''
523
523
524 def __init__(self, ui):
524 def __init__(self, ui):
525 bzmysql_2_18.__init__(self, ui)
525 bzmysql_2_18.__init__(self, ui)
526
526
527 def get_longdesc_id(self):
527 def get_longdesc_id(self):
528 '''get identity of longdesc field'''
528 '''get identity of longdesc field'''
529 self.run('select id from fielddefs where name = "longdesc"')
529 self.run('select id from fielddefs where name = "longdesc"')
530 ids = self.cursor.fetchall()
530 ids = self.cursor.fetchall()
531 if len(ids) != 1:
531 if len(ids) != 1:
532 raise error.Abort(_('unknown database schema'))
532 raise error.Abort(_('unknown database schema'))
533 return ids[0][0]
533 return ids[0][0]
534
534
535 # Bugzilla via XMLRPC interface.
535 # Bugzilla via XMLRPC interface.
536
536
537 class cookietransportrequest(object):
537 class cookietransportrequest(object):
538 """A Transport request method that retains cookies over its lifetime.
538 """A Transport request method that retains cookies over its lifetime.
539
539
540 The regular xmlrpclib transports ignore cookies. Which causes
540 The regular xmlrpclib transports ignore cookies. Which causes
541 a bit of a problem when you need a cookie-based login, as with
541 a bit of a problem when you need a cookie-based login, as with
542 the Bugzilla XMLRPC interface prior to 4.4.3.
542 the Bugzilla XMLRPC interface prior to 4.4.3.
543
543
544 So this is a helper for defining a Transport which looks for
544 So this is a helper for defining a Transport which looks for
545 cookies being set in responses and saves them to add to all future
545 cookies being set in responses and saves them to add to all future
546 requests.
546 requests.
547 """
547 """
548
548
549 # Inspiration drawn from
549 # Inspiration drawn from
550 # http://blog.godson.in/2010/09/how-to-make-python-xmlrpclib-client.html
550 # http://blog.godson.in/2010/09/how-to-make-python-xmlrpclib-client.html
551 # http://www.itkovian.net/base/transport-class-for-pythons-xml-rpc-lib/
551 # http://www.itkovian.net/base/transport-class-for-pythons-xml-rpc-lib/
552
552
553 cookies = []
553 cookies = []
554 def send_cookies(self, connection):
554 def send_cookies(self, connection):
555 if self.cookies:
555 if self.cookies:
556 for cookie in self.cookies:
556 for cookie in self.cookies:
557 connection.putheader("Cookie", cookie)
557 connection.putheader("Cookie", cookie)
558
558
559 def request(self, host, handler, request_body, verbose=0):
559 def request(self, host, handler, request_body, verbose=0):
560 self.verbose = verbose
560 self.verbose = verbose
561 self.accept_gzip_encoding = False
561 self.accept_gzip_encoding = False
562
562
563 # issue XML-RPC request
563 # issue XML-RPC request
564 h = self.make_connection(host)
564 h = self.make_connection(host)
565 if verbose:
565 if verbose:
566 h.set_debuglevel(1)
566 h.set_debuglevel(1)
567
567
568 self.send_request(h, handler, request_body)
568 self.send_request(h, handler, request_body)
569 self.send_host(h, host)
569 self.send_host(h, host)
570 self.send_cookies(h)
570 self.send_cookies(h)
571 self.send_user_agent(h)
571 self.send_user_agent(h)
572 self.send_content(h, request_body)
572 self.send_content(h, request_body)
573
573
574 # Deal with differences between Python 2.4-2.6 and 2.7.
574 # Deal with differences between Python 2.4-2.6 and 2.7.
575 # In the former h is a HTTP(S). In the latter it's a
575 # In the former h is a HTTP(S). In the latter it's a
576 # HTTP(S)Connection. Luckily, the 2.4-2.6 implementation of
576 # HTTP(S)Connection. Luckily, the 2.4-2.6 implementation of
577 # HTTP(S) has an underlying HTTP(S)Connection, so extract
577 # HTTP(S) has an underlying HTTP(S)Connection, so extract
578 # that and use it.
578 # that and use it.
579 try:
579 try:
580 response = h.getresponse()
580 response = h.getresponse()
581 except AttributeError:
581 except AttributeError:
582 response = h._conn.getresponse()
582 response = h._conn.getresponse()
583
583
584 # Add any cookie definitions to our list.
584 # Add any cookie definitions to our list.
585 for header in response.msg.getallmatchingheaders("Set-Cookie"):
585 for header in response.msg.getallmatchingheaders("Set-Cookie"):
586 val = header.split(": ", 1)[1]
586 val = header.split(": ", 1)[1]
587 cookie = val.split(";", 1)[0]
587 cookie = val.split(";", 1)[0]
588 self.cookies.append(cookie)
588 self.cookies.append(cookie)
589
589
590 if response.status != 200:
590 if response.status != 200:
591 raise xmlrpclib.ProtocolError(host + handler, response.status,
591 raise xmlrpclib.ProtocolError(host + handler, response.status,
592 response.reason, response.msg.headers)
592 response.reason, response.msg.headers)
593
593
594 payload = response.read()
594 payload = response.read()
595 parser, unmarshaller = self.getparser()
595 parser, unmarshaller = self.getparser()
596 parser.feed(payload)
596 parser.feed(payload)
597 parser.close()
597 parser.close()
598
598
599 return unmarshaller.close()
599 return unmarshaller.close()
600
600
601 # The explicit calls to the underlying xmlrpclib __init__() methods are
601 # The explicit calls to the underlying xmlrpclib __init__() methods are
602 # necessary. The xmlrpclib.Transport classes are old-style classes, and
602 # necessary. The xmlrpclib.Transport classes are old-style classes, and
603 # it turns out their __init__() doesn't get called when doing multiple
603 # it turns out their __init__() doesn't get called when doing multiple
604 # inheritance with a new-style class.
604 # inheritance with a new-style class.
605 class cookietransport(cookietransportrequest, xmlrpclib.Transport):
605 class cookietransport(cookietransportrequest, xmlrpclib.Transport):
606 def __init__(self, use_datetime=0):
606 def __init__(self, use_datetime=0):
607 if util.safehasattr(xmlrpclib.Transport, "__init__"):
607 if util.safehasattr(xmlrpclib.Transport, "__init__"):
608 xmlrpclib.Transport.__init__(self, use_datetime)
608 xmlrpclib.Transport.__init__(self, use_datetime)
609
609
610 class cookiesafetransport(cookietransportrequest, xmlrpclib.SafeTransport):
610 class cookiesafetransport(cookietransportrequest, xmlrpclib.SafeTransport):
611 def __init__(self, use_datetime=0):
611 def __init__(self, use_datetime=0):
612 if util.safehasattr(xmlrpclib.Transport, "__init__"):
612 if util.safehasattr(xmlrpclib.Transport, "__init__"):
613 xmlrpclib.SafeTransport.__init__(self, use_datetime)
613 xmlrpclib.SafeTransport.__init__(self, use_datetime)
614
614
615 class bzxmlrpc(bzaccess):
615 class bzxmlrpc(bzaccess):
616 """Support for access to Bugzilla via the Bugzilla XMLRPC API.
616 """Support for access to Bugzilla via the Bugzilla XMLRPC API.
617
617
618 Requires a minimum Bugzilla version 3.4.
618 Requires a minimum Bugzilla version 3.4.
619 """
619 """
620
620
621 def __init__(self, ui):
621 def __init__(self, ui):
622 bzaccess.__init__(self, ui)
622 bzaccess.__init__(self, ui)
623
623
624 bzweb = self.ui.config('bugzilla', 'bzurl',
624 bzweb = self.ui.config('bugzilla', 'bzurl',
625 'http://localhost/bugzilla/')
625 'http://localhost/bugzilla/')
626 bzweb = bzweb.rstrip("/") + "/xmlrpc.cgi"
626 bzweb = bzweb.rstrip("/") + "/xmlrpc.cgi"
627
627
628 user = self.ui.config('bugzilla', 'user', 'bugs')
628 user = self.ui.config('bugzilla', 'user', 'bugs')
629 passwd = self.ui.config('bugzilla', 'password')
629 passwd = self.ui.config('bugzilla', 'password')
630
630
631 self.fixstatus = self.ui.config('bugzilla', 'fixstatus', 'RESOLVED')
631 self.fixstatus = self.ui.config('bugzilla', 'fixstatus', 'RESOLVED')
632 self.fixresolution = self.ui.config('bugzilla', 'fixresolution',
632 self.fixresolution = self.ui.config('bugzilla', 'fixresolution',
633 'FIXED')
633 'FIXED')
634
634
635 self.bzproxy = xmlrpclib.ServerProxy(bzweb, self.transport(bzweb))
635 self.bzproxy = xmlrpclib.ServerProxy(bzweb, self.transport(bzweb))
636 ver = self.bzproxy.Bugzilla.version()['version'].split('.')
636 ver = self.bzproxy.Bugzilla.version()['version'].split('.')
637 self.bzvermajor = int(ver[0])
637 self.bzvermajor = int(ver[0])
638 self.bzverminor = int(ver[1])
638 self.bzverminor = int(ver[1])
639 login = self.bzproxy.User.login({'login': user, 'password': passwd,
639 login = self.bzproxy.User.login({'login': user, 'password': passwd,
640 'restrict_login': True})
640 'restrict_login': True})
641 self.bztoken = login.get('token', '')
641 self.bztoken = login.get('token', '')
642
642
643 def transport(self, uri):
643 def transport(self, uri):
644 if urlparse.urlparse(uri, "http")[0] == "https":
644 if urlparse.urlparse(uri, "http")[0] == "https":
645 return cookiesafetransport()
645 return cookiesafetransport()
646 else:
646 else:
647 return cookietransport()
647 return cookietransport()
648
648
649 def get_bug_comments(self, id):
649 def get_bug_comments(self, id):
650 """Return a string with all comment text for a bug."""
650 """Return a string with all comment text for a bug."""
651 c = self.bzproxy.Bug.comments({'ids': [id],
651 c = self.bzproxy.Bug.comments({'ids': [id],
652 'include_fields': ['text'],
652 'include_fields': ['text'],
653 'token': self.bztoken})
653 'token': self.bztoken})
654 return ''.join([t['text'] for t in c['bugs'][str(id)]['comments']])
654 return ''.join([t['text'] for t in c['bugs'][str(id)]['comments']])
655
655
656 def filter_real_bug_ids(self, bugs):
656 def filter_real_bug_ids(self, bugs):
657 probe = self.bzproxy.Bug.get({'ids': sorted(bugs.keys()),
657 probe = self.bzproxy.Bug.get({'ids': sorted(bugs.keys()),
658 'include_fields': [],
658 'include_fields': [],
659 'permissive': True,
659 'permissive': True,
660 'token': self.bztoken,
660 'token': self.bztoken,
661 })
661 })
662 for badbug in probe['faults']:
662 for badbug in probe['faults']:
663 id = badbug['id']
663 id = badbug['id']
664 self.ui.status(_('bug %d does not exist\n') % id)
664 self.ui.status(_('bug %d does not exist\n') % id)
665 del bugs[id]
665 del bugs[id]
666
666
667 def filter_cset_known_bug_ids(self, node, bugs):
667 def filter_cset_known_bug_ids(self, node, bugs):
668 for id in sorted(bugs.keys()):
668 for id in sorted(bugs.keys()):
669 if self.get_bug_comments(id).find(short(node)) != -1:
669 if self.get_bug_comments(id).find(short(node)) != -1:
670 self.ui.status(_('bug %d already knows about changeset %s\n') %
670 self.ui.status(_('bug %d already knows about changeset %s\n') %
671 (id, short(node)))
671 (id, short(node)))
672 del bugs[id]
672 del bugs[id]
673
673
674 def updatebug(self, bugid, newstate, text, committer):
674 def updatebug(self, bugid, newstate, text, committer):
675 args = {}
675 args = {}
676 if 'hours' in newstate:
676 if 'hours' in newstate:
677 args['work_time'] = newstate['hours']
677 args['work_time'] = newstate['hours']
678
678
679 if self.bzvermajor >= 4:
679 if self.bzvermajor >= 4:
680 args['ids'] = [bugid]
680 args['ids'] = [bugid]
681 args['comment'] = {'body' : text}
681 args['comment'] = {'body' : text}
682 if 'fix' in newstate:
682 if 'fix' in newstate:
683 args['status'] = self.fixstatus
683 args['status'] = self.fixstatus
684 args['resolution'] = self.fixresolution
684 args['resolution'] = self.fixresolution
685 args['token'] = self.bztoken
685 args['token'] = self.bztoken
686 self.bzproxy.Bug.update(args)
686 self.bzproxy.Bug.update(args)
687 else:
687 else:
688 if 'fix' in newstate:
688 if 'fix' in newstate:
689 self.ui.warn(_("Bugzilla/XMLRPC needs Bugzilla 4.0 or later "
689 self.ui.warn(_("Bugzilla/XMLRPC needs Bugzilla 4.0 or later "
690 "to mark bugs fixed\n"))
690 "to mark bugs fixed\n"))
691 args['id'] = bugid
691 args['id'] = bugid
692 args['comment'] = text
692 args['comment'] = text
693 self.bzproxy.Bug.add_comment(args)
693 self.bzproxy.Bug.add_comment(args)
694
694
695 class bzxmlrpcemail(bzxmlrpc):
695 class bzxmlrpcemail(bzxmlrpc):
696 """Read data from Bugzilla via XMLRPC, send updates via email.
696 """Read data from Bugzilla via XMLRPC, send updates via email.
697
697
698 Advantages of sending updates via email:
698 Advantages of sending updates via email:
699 1. Comments can be added as any user, not just logged in user.
699 1. Comments can be added as any user, not just logged in user.
700 2. Bug statuses or other fields not accessible via XMLRPC can
700 2. Bug statuses or other fields not accessible via XMLRPC can
701 potentially be updated.
701 potentially be updated.
702
702
703 There is no XMLRPC function to change bug status before Bugzilla
703 There is no XMLRPC function to change bug status before Bugzilla
704 4.0, so bugs cannot be marked fixed via XMLRPC before Bugzilla 4.0.
704 4.0, so bugs cannot be marked fixed via XMLRPC before Bugzilla 4.0.
705 But bugs can be marked fixed via email from 3.4 onwards.
705 But bugs can be marked fixed via email from 3.4 onwards.
706 """
706 """
707
707
708 # The email interface changes subtly between 3.4 and 3.6. In 3.4,
708 # The email interface changes subtly between 3.4 and 3.6. In 3.4,
709 # in-email fields are specified as '@<fieldname> = <value>'. In
709 # in-email fields are specified as '@<fieldname> = <value>'. In
710 # 3.6 this becomes '@<fieldname> <value>'. And fieldname @bug_id
710 # 3.6 this becomes '@<fieldname> <value>'. And fieldname @bug_id
711 # in 3.4 becomes @id in 3.6. 3.6 and 4.0 both maintain backwards
711 # in 3.4 becomes @id in 3.6. 3.6 and 4.0 both maintain backwards
712 # compatibility, but rather than rely on this use the new format for
712 # compatibility, but rather than rely on this use the new format for
713 # 4.0 onwards.
713 # 4.0 onwards.
714
714
715 def __init__(self, ui):
715 def __init__(self, ui):
716 bzxmlrpc.__init__(self, ui)
716 bzxmlrpc.__init__(self, ui)
717
717
718 self.bzemail = self.ui.config('bugzilla', 'bzemail')
718 self.bzemail = self.ui.config('bugzilla', 'bzemail')
719 if not self.bzemail:
719 if not self.bzemail:
720 raise error.Abort(_("configuration 'bzemail' missing"))
720 raise error.Abort(_("configuration 'bzemail' missing"))
721 mail.validateconfig(self.ui)
721 mail.validateconfig(self.ui)
722
722
723 def makecommandline(self, fieldname, value):
723 def makecommandline(self, fieldname, value):
724 if self.bzvermajor >= 4:
724 if self.bzvermajor >= 4:
725 return "@%s %s" % (fieldname, str(value))
725 return "@%s %s" % (fieldname, str(value))
726 else:
726 else:
727 if fieldname == "id":
727 if fieldname == "id":
728 fieldname = "bug_id"
728 fieldname = "bug_id"
729 return "@%s = %s" % (fieldname, str(value))
729 return "@%s = %s" % (fieldname, str(value))
730
730
731 def send_bug_modify_email(self, bugid, commands, comment, committer):
731 def send_bug_modify_email(self, bugid, commands, comment, committer):
732 '''send modification message to Bugzilla bug via email.
732 '''send modification message to Bugzilla bug via email.
733
733
734 The message format is documented in the Bugzilla email_in.pl
734 The message format is documented in the Bugzilla email_in.pl
735 specification. commands is a list of command lines, comment is the
735 specification. commands is a list of command lines, comment is the
736 comment text.
736 comment text.
737
737
738 To stop users from crafting commit comments with
738 To stop users from crafting commit comments with
739 Bugzilla commands, specify the bug ID via the message body, rather
739 Bugzilla commands, specify the bug ID via the message body, rather
740 than the subject line, and leave a blank line after it.
740 than the subject line, and leave a blank line after it.
741 '''
741 '''
742 user = self.map_committer(committer)
742 user = self.map_committer(committer)
743 matches = self.bzproxy.User.get({'match': [user],
743 matches = self.bzproxy.User.get({'match': [user],
744 'token': self.bztoken})
744 'token': self.bztoken})
745 if not matches['users']:
745 if not matches['users']:
746 user = self.ui.config('bugzilla', 'user', 'bugs')
746 user = self.ui.config('bugzilla', 'user', 'bugs')
747 matches = self.bzproxy.User.get({'match': [user],
747 matches = self.bzproxy.User.get({'match': [user],
748 'token': self.bztoken})
748 'token': self.bztoken})
749 if not matches['users']:
749 if not matches['users']:
750 raise error.Abort(_("default bugzilla user %s email not found")
750 raise error.Abort(_("default bugzilla user %s email not found")
751 % user)
751 % user)
752 user = matches['users'][0]['email']
752 user = matches['users'][0]['email']
753 commands.append(self.makecommandline("id", bugid))
753 commands.append(self.makecommandline("id", bugid))
754
754
755 text = "\n".join(commands) + "\n\n" + comment
755 text = "\n".join(commands) + "\n\n" + comment
756
756
757 _charsets = mail._charsets(self.ui)
757 _charsets = mail._charsets(self.ui)
758 user = mail.addressencode(self.ui, user, _charsets)
758 user = mail.addressencode(self.ui, user, _charsets)
759 bzemail = mail.addressencode(self.ui, self.bzemail, _charsets)
759 bzemail = mail.addressencode(self.ui, self.bzemail, _charsets)
760 msg = mail.mimeencode(self.ui, text, _charsets)
760 msg = mail.mimeencode(self.ui, text, _charsets)
761 msg['From'] = user
761 msg['From'] = user
762 msg['To'] = bzemail
762 msg['To'] = bzemail
763 msg['Subject'] = mail.headencode(self.ui, "Bug modification", _charsets)
763 msg['Subject'] = mail.headencode(self.ui, "Bug modification", _charsets)
764 sendmail = mail.connect(self.ui)
764 sendmail = mail.connect(self.ui)
765 sendmail(user, bzemail, msg.as_string())
765 sendmail(user, bzemail, msg.as_string())
766
766
767 def updatebug(self, bugid, newstate, text, committer):
767 def updatebug(self, bugid, newstate, text, committer):
768 cmds = []
768 cmds = []
769 if 'hours' in newstate:
769 if 'hours' in newstate:
770 cmds.append(self.makecommandline("work_time", newstate['hours']))
770 cmds.append(self.makecommandline("work_time", newstate['hours']))
771 if 'fix' in newstate:
771 if 'fix' in newstate:
772 cmds.append(self.makecommandline("bug_status", self.fixstatus))
772 cmds.append(self.makecommandline("bug_status", self.fixstatus))
773 cmds.append(self.makecommandline("resolution", self.fixresolution))
773 cmds.append(self.makecommandline("resolution", self.fixresolution))
774 self.send_bug_modify_email(bugid, cmds, text, committer)
774 self.send_bug_modify_email(bugid, cmds, text, committer)
775
775
776 class bugzilla(object):
776 class bugzilla(object):
777 # supported versions of bugzilla. different versions have
777 # supported versions of bugzilla. different versions have
778 # different schemas.
778 # different schemas.
779 _versions = {
779 _versions = {
780 '2.16': bzmysql,
780 '2.16': bzmysql,
781 '2.18': bzmysql_2_18,
781 '2.18': bzmysql_2_18,
782 '3.0': bzmysql_3_0,
782 '3.0': bzmysql_3_0,
783 'xmlrpc': bzxmlrpc,
783 'xmlrpc': bzxmlrpc,
784 'xmlrpc+email': bzxmlrpcemail
784 'xmlrpc+email': bzxmlrpcemail
785 }
785 }
786
786
787 _default_bug_re = (r'bugs?\s*,?\s*(?:#|nos?\.?|num(?:ber)?s?)?\s*'
787 _default_bug_re = (r'bugs?\s*,?\s*(?:#|nos?\.?|num(?:ber)?s?)?\s*'
788 r'(?P<ids>(?:\d+\s*(?:,?\s*(?:and)?)?\s*)+)'
788 r'(?P<ids>(?:\d+\s*(?:,?\s*(?:and)?)?\s*)+)'
789 r'\.?\s*(?:h(?:ours?)?\s*(?P<hours>\d*(?:\.\d+)?))?')
789 r'\.?\s*(?:h(?:ours?)?\s*(?P<hours>\d*(?:\.\d+)?))?')
790
790
791 _default_fix_re = (r'fix(?:es)?\s*(?:bugs?\s*)?,?\s*'
791 _default_fix_re = (r'fix(?:es)?\s*(?:bugs?\s*)?,?\s*'
792 r'(?:nos?\.?|num(?:ber)?s?)?\s*'
792 r'(?:nos?\.?|num(?:ber)?s?)?\s*'
793 r'(?P<ids>(?:#?\d+\s*(?:,?\s*(?:and)?)?\s*)+)'
793 r'(?P<ids>(?:#?\d+\s*(?:,?\s*(?:and)?)?\s*)+)'
794 r'\.?\s*(?:h(?:ours?)?\s*(?P<hours>\d*(?:\.\d+)?))?')
794 r'\.?\s*(?:h(?:ours?)?\s*(?P<hours>\d*(?:\.\d+)?))?')
795
795
796 def __init__(self, ui, repo):
796 def __init__(self, ui, repo):
797 self.ui = ui
797 self.ui = ui
798 self.repo = repo
798 self.repo = repo
799
799
800 bzversion = self.ui.config('bugzilla', 'version')
800 bzversion = self.ui.config('bugzilla', 'version')
801 try:
801 try:
802 bzclass = bugzilla._versions[bzversion]
802 bzclass = bugzilla._versions[bzversion]
803 except KeyError:
803 except KeyError:
804 raise error.Abort(_('bugzilla version %s not supported') %
804 raise error.Abort(_('bugzilla version %s not supported') %
805 bzversion)
805 bzversion)
806 self.bzdriver = bzclass(self.ui)
806 self.bzdriver = bzclass(self.ui)
807
807
808 self.bug_re = re.compile(
808 self.bug_re = re.compile(
809 self.ui.config('bugzilla', 'regexp',
809 self.ui.config('bugzilla', 'regexp',
810 bugzilla._default_bug_re), re.IGNORECASE)
810 bugzilla._default_bug_re), re.IGNORECASE)
811 self.fix_re = re.compile(
811 self.fix_re = re.compile(
812 self.ui.config('bugzilla', 'fixregexp',
812 self.ui.config('bugzilla', 'fixregexp',
813 bugzilla._default_fix_re), re.IGNORECASE)
813 bugzilla._default_fix_re), re.IGNORECASE)
814 self.split_re = re.compile(r'\D+')
814 self.split_re = re.compile(r'\D+')
815
815
816 def find_bugs(self, ctx):
816 def find_bugs(self, ctx):
817 '''return bugs dictionary created from commit comment.
817 '''return bugs dictionary created from commit comment.
818
818
819 Extract bug info from changeset comments. Filter out any that are
819 Extract bug info from changeset comments. Filter out any that are
820 not known to Bugzilla, and any that already have a reference to
820 not known to Bugzilla, and any that already have a reference to
821 the given changeset in their comments.
821 the given changeset in their comments.
822 '''
822 '''
823 start = 0
823 start = 0
824 hours = 0.0
824 hours = 0.0
825 bugs = {}
825 bugs = {}
826 bugmatch = self.bug_re.search(ctx.description(), start)
826 bugmatch = self.bug_re.search(ctx.description(), start)
827 fixmatch = self.fix_re.search(ctx.description(), start)
827 fixmatch = self.fix_re.search(ctx.description(), start)
828 while True:
828 while True:
829 bugattribs = {}
829 bugattribs = {}
830 if not bugmatch and not fixmatch:
830 if not bugmatch and not fixmatch:
831 break
831 break
832 if not bugmatch:
832 if not bugmatch:
833 m = fixmatch
833 m = fixmatch
834 elif not fixmatch:
834 elif not fixmatch:
835 m = bugmatch
835 m = bugmatch
836 else:
836 else:
837 if bugmatch.start() < fixmatch.start():
837 if bugmatch.start() < fixmatch.start():
838 m = bugmatch
838 m = bugmatch
839 else:
839 else:
840 m = fixmatch
840 m = fixmatch
841 start = m.end()
841 start = m.end()
842 if m is bugmatch:
842 if m is bugmatch:
843 bugmatch = self.bug_re.search(ctx.description(), start)
843 bugmatch = self.bug_re.search(ctx.description(), start)
844 if 'fix' in bugattribs:
844 if 'fix' in bugattribs:
845 del bugattribs['fix']
845 del bugattribs['fix']
846 else:
846 else:
847 fixmatch = self.fix_re.search(ctx.description(), start)
847 fixmatch = self.fix_re.search(ctx.description(), start)
848 bugattribs['fix'] = None
848 bugattribs['fix'] = None
849
849
850 try:
850 try:
851 ids = m.group('ids')
851 ids = m.group('ids')
852 except IndexError:
852 except IndexError:
853 ids = m.group(1)
853 ids = m.group(1)
854 try:
854 try:
855 hours = float(m.group('hours'))
855 hours = float(m.group('hours'))
856 bugattribs['hours'] = hours
856 bugattribs['hours'] = hours
857 except IndexError:
857 except IndexError:
858 pass
858 pass
859 except TypeError:
859 except TypeError:
860 pass
860 pass
861 except ValueError:
861 except ValueError:
862 self.ui.status(_("%s: invalid hours\n") % m.group('hours'))
862 self.ui.status(_("%s: invalid hours\n") % m.group('hours'))
863
863
864 for id in self.split_re.split(ids):
864 for id in self.split_re.split(ids):
865 if not id:
865 if not id:
866 continue
866 continue
867 bugs[int(id)] = bugattribs
867 bugs[int(id)] = bugattribs
868 if bugs:
868 if bugs:
869 self.bzdriver.filter_real_bug_ids(bugs)
869 self.bzdriver.filter_real_bug_ids(bugs)
870 if bugs:
870 if bugs:
871 self.bzdriver.filter_cset_known_bug_ids(ctx.node(), bugs)
871 self.bzdriver.filter_cset_known_bug_ids(ctx.node(), bugs)
872 return bugs
872 return bugs
873
873
874 def update(self, bugid, newstate, ctx):
874 def update(self, bugid, newstate, ctx):
875 '''update bugzilla bug with reference to changeset.'''
875 '''update bugzilla bug with reference to changeset.'''
876
876
877 def webroot(root):
877 def webroot(root):
878 '''strip leading prefix of repo root and turn into
878 '''strip leading prefix of repo root and turn into
879 url-safe path.'''
879 url-safe path.'''
880 count = int(self.ui.config('bugzilla', 'strip', 0))
880 count = int(self.ui.config('bugzilla', 'strip', 0))
881 root = util.pconvert(root)
881 root = util.pconvert(root)
882 while count > 0:
882 while count > 0:
883 c = root.find('/')
883 c = root.find('/')
884 if c == -1:
884 if c == -1:
885 break
885 break
886 root = root[c + 1:]
886 root = root[c + 1:]
887 count -= 1
887 count -= 1
888 return root
888 return root
889
889
890 mapfile = None
890 mapfile = None
891 tmpl = self.ui.config('bugzilla', 'template')
891 tmpl = self.ui.config('bugzilla', 'template')
892 if not tmpl:
892 if not tmpl:
893 mapfile = self.ui.config('bugzilla', 'style')
893 mapfile = self.ui.config('bugzilla', 'style')
894 if not mapfile and not tmpl:
894 if not mapfile and not tmpl:
895 tmpl = _('changeset {node|short} in repo {root} refers '
895 tmpl = _('changeset {node|short} in repo {root} refers '
896 'to bug {bug}.\ndetails:\n\t{desc|tabindent}')
896 'to bug {bug}.\ndetails:\n\t{desc|tabindent}')
897 t = cmdutil.changeset_templater(self.ui, self.repo,
897 t = cmdutil.changeset_templater(self.ui, self.repo,
898 False, None, tmpl, mapfile, False)
898 False, None, tmpl, mapfile, False)
899 self.ui.pushbuffer()
899 self.ui.pushbuffer()
900 t.show(ctx, changes=ctx.changeset(),
900 t.show(ctx, changes=ctx.changeset(),
901 bug=str(bugid),
901 bug=str(bugid),
902 hgweb=self.ui.config('web', 'baseurl'),
902 hgweb=self.ui.config('web', 'baseurl'),
903 root=self.repo.root,
903 root=self.repo.root,
904 webroot=webroot(self.repo.root))
904 webroot=webroot(self.repo.root))
905 data = self.ui.popbuffer()
905 data = self.ui.popbuffer()
906 self.bzdriver.updatebug(bugid, newstate, data, util.email(ctx.user()))
906 self.bzdriver.updatebug(bugid, newstate, data, util.email(ctx.user()))
907
907
908 def notify(self, bugs, committer):
908 def notify(self, bugs, committer):
909 '''ensure Bugzilla users are notified of bug change.'''
909 '''ensure Bugzilla users are notified of bug change.'''
910 self.bzdriver.notify(bugs, committer)
910 self.bzdriver.notify(bugs, committer)
911
911
912 def hook(ui, repo, hooktype, node=None, **kwargs):
912 def hook(ui, repo, hooktype, node=None, **kwargs):
913 '''add comment to bugzilla for each changeset that refers to a
913 '''add comment to bugzilla for each changeset that refers to a
914 bugzilla bug id. only add a comment once per bug, so same change
914 bugzilla bug id. only add a comment once per bug, so same change
915 seen multiple times does not fill bug with duplicate data.'''
915 seen multiple times does not fill bug with duplicate data.'''
916 if node is None:
916 if node is None:
917 raise error.Abort(_('hook type %s does not pass a changeset id') %
917 raise error.Abort(_('hook type %s does not pass a changeset id') %
918 hooktype)
918 hooktype)
919 try:
919 try:
920 bz = bugzilla(ui, repo)
920 bz = bugzilla(ui, repo)
921 ctx = repo[node]
921 ctx = repo[node]
922 bugs = bz.find_bugs(ctx)
922 bugs = bz.find_bugs(ctx)
923 if bugs:
923 if bugs:
924 for bug in bugs:
924 for bug in bugs:
925 bz.update(bug, bugs[bug], ctx)
925 bz.update(bug, bugs[bug], ctx)
926 bz.notify(bugs, util.email(ctx.user()))
926 bz.notify(bugs, util.email(ctx.user()))
927 except Exception as e:
927 except Exception as e:
928 raise error.Abort(_('Bugzilla error: %s') % e)
928 raise error.Abort(_('Bugzilla error: %s') % e)
@@ -1,190 +1,190 b''
1 # Copyright (C) 2015 - Mike Edgar <adgar@google.com>
1 # Copyright (C) 2015 - Mike Edgar <adgar@google.com>
2 #
2 #
3 # This extension enables removal of file content at a given revision,
3 # This extension enables removal of file content at a given revision,
4 # rewriting the data/metadata of successive revisions to preserve revision log
4 # rewriting the data/metadata of successive revisions to preserve revision log
5 # integrity.
5 # integrity.
6
6
7 """erase file content at a given revision
7 """erase file content at a given revision
8
8
9 The censor command instructs Mercurial to erase all content of a file at a given
9 The censor command instructs Mercurial to erase all content of a file at a given
10 revision *without updating the changeset hash.* This allows existing history to
10 revision *without updating the changeset hash.* This allows existing history to
11 remain valid while preventing future clones/pulls from receiving the erased
11 remain valid while preventing future clones/pulls from receiving the erased
12 data.
12 data.
13
13
14 Typical uses for censor are due to security or legal requirements, including::
14 Typical uses for censor are due to security or legal requirements, including::
15
15
16 * Passwords, private keys, cryptographic material
16 * Passwords, private keys, cryptographic material
17 * Licensed data/code/libraries for which the license has expired
17 * Licensed data/code/libraries for which the license has expired
18 * Personally Identifiable Information or other private data
18 * Personally Identifiable Information or other private data
19
19
20 Censored nodes can interrupt mercurial's typical operation whenever the excised
20 Censored nodes can interrupt mercurial's typical operation whenever the excised
21 data needs to be materialized. Some commands, like ``hg cat``/``hg revert``,
21 data needs to be materialized. Some commands, like ``hg cat``/``hg revert``,
22 simply fail when asked to produce censored data. Others, like ``hg verify`` and
22 simply fail when asked to produce censored data. Others, like ``hg verify`` and
23 ``hg update``, must be capable of tolerating censored data to continue to
23 ``hg update``, must be capable of tolerating censored data to continue to
24 function in a meaningful way. Such commands only tolerate censored file
24 function in a meaningful way. Such commands only tolerate censored file
25 revisions if they are allowed by the "censor.policy=ignore" config option.
25 revisions if they are allowed by the "censor.policy=ignore" config option.
26 """
26 """
27
27
28 from __future__ import absolute_import
28 from __future__ import absolute_import
29
29
30 from mercurial.i18n import _
30 from mercurial.i18n import _
31 from mercurial.node import short
31 from mercurial.node import short
32
32
33 from mercurial import (
33 from mercurial import (
34 cmdutil,
34 cmdutil,
35 error,
35 error,
36 filelog,
36 filelog,
37 lock as lockmod,
37 lock as lockmod,
38 revlog,
38 revlog,
39 scmutil,
39 scmutil,
40 util,
40 util,
41 )
41 )
42
42
43 cmdtable = {}
43 cmdtable = {}
44 command = cmdutil.command(cmdtable)
44 command = cmdutil.command(cmdtable)
45 # Note for extension authors: ONLY specify testedwith = 'internal' for
45 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
46 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
46 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
47 # be specifying the version(s) of Mercurial they are tested with, or
47 # be specifying the version(s) of Mercurial they are tested with, or
48 # leave the attribute unspecified.
48 # leave the attribute unspecified.
49 testedwith = 'internal'
49 testedwith = 'ships-with-hg-core'
50
50
51 @command('censor',
51 @command('censor',
52 [('r', 'rev', '', _('censor file from specified revision'), _('REV')),
52 [('r', 'rev', '', _('censor file from specified revision'), _('REV')),
53 ('t', 'tombstone', '', _('replacement tombstone data'), _('TEXT'))],
53 ('t', 'tombstone', '', _('replacement tombstone data'), _('TEXT'))],
54 _('-r REV [-t TEXT] [FILE]'))
54 _('-r REV [-t TEXT] [FILE]'))
55 def censor(ui, repo, path, rev='', tombstone='', **opts):
55 def censor(ui, repo, path, rev='', tombstone='', **opts):
56 wlock = lock = None
56 wlock = lock = None
57 try:
57 try:
58 wlock = repo.wlock()
58 wlock = repo.wlock()
59 lock = repo.lock()
59 lock = repo.lock()
60 return _docensor(ui, repo, path, rev, tombstone, **opts)
60 return _docensor(ui, repo, path, rev, tombstone, **opts)
61 finally:
61 finally:
62 lockmod.release(lock, wlock)
62 lockmod.release(lock, wlock)
63
63
64 def _docensor(ui, repo, path, rev='', tombstone='', **opts):
64 def _docensor(ui, repo, path, rev='', tombstone='', **opts):
65 if not path:
65 if not path:
66 raise error.Abort(_('must specify file path to censor'))
66 raise error.Abort(_('must specify file path to censor'))
67 if not rev:
67 if not rev:
68 raise error.Abort(_('must specify revision to censor'))
68 raise error.Abort(_('must specify revision to censor'))
69
69
70 wctx = repo[None]
70 wctx = repo[None]
71
71
72 m = scmutil.match(wctx, (path,))
72 m = scmutil.match(wctx, (path,))
73 if m.anypats() or len(m.files()) != 1:
73 if m.anypats() or len(m.files()) != 1:
74 raise error.Abort(_('can only specify an explicit filename'))
74 raise error.Abort(_('can only specify an explicit filename'))
75 path = m.files()[0]
75 path = m.files()[0]
76 flog = repo.file(path)
76 flog = repo.file(path)
77 if not len(flog):
77 if not len(flog):
78 raise error.Abort(_('cannot censor file with no history'))
78 raise error.Abort(_('cannot censor file with no history'))
79
79
80 rev = scmutil.revsingle(repo, rev, rev).rev()
80 rev = scmutil.revsingle(repo, rev, rev).rev()
81 try:
81 try:
82 ctx = repo[rev]
82 ctx = repo[rev]
83 except KeyError:
83 except KeyError:
84 raise error.Abort(_('invalid revision identifier %s') % rev)
84 raise error.Abort(_('invalid revision identifier %s') % rev)
85
85
86 try:
86 try:
87 fctx = ctx.filectx(path)
87 fctx = ctx.filectx(path)
88 except error.LookupError:
88 except error.LookupError:
89 raise error.Abort(_('file does not exist at revision %s') % rev)
89 raise error.Abort(_('file does not exist at revision %s') % rev)
90
90
91 fnode = fctx.filenode()
91 fnode = fctx.filenode()
92 headctxs = [repo[c] for c in repo.heads()]
92 headctxs = [repo[c] for c in repo.heads()]
93 heads = [c for c in headctxs if path in c and c.filenode(path) == fnode]
93 heads = [c for c in headctxs if path in c and c.filenode(path) == fnode]
94 if heads:
94 if heads:
95 headlist = ', '.join([short(c.node()) for c in heads])
95 headlist = ', '.join([short(c.node()) for c in heads])
96 raise error.Abort(_('cannot censor file in heads (%s)') % headlist,
96 raise error.Abort(_('cannot censor file in heads (%s)') % headlist,
97 hint=_('clean/delete and commit first'))
97 hint=_('clean/delete and commit first'))
98
98
99 wp = wctx.parents()
99 wp = wctx.parents()
100 if ctx.node() in [p.node() for p in wp]:
100 if ctx.node() in [p.node() for p in wp]:
101 raise error.Abort(_('cannot censor working directory'),
101 raise error.Abort(_('cannot censor working directory'),
102 hint=_('clean/delete/update first'))
102 hint=_('clean/delete/update first'))
103
103
104 flogv = flog.version & 0xFFFF
104 flogv = flog.version & 0xFFFF
105 if flogv != revlog.REVLOGNG:
105 if flogv != revlog.REVLOGNG:
106 raise error.Abort(
106 raise error.Abort(
107 _('censor does not support revlog version %d') % (flogv,))
107 _('censor does not support revlog version %d') % (flogv,))
108
108
109 tombstone = filelog.packmeta({"censored": tombstone}, "")
109 tombstone = filelog.packmeta({"censored": tombstone}, "")
110
110
111 crev = fctx.filerev()
111 crev = fctx.filerev()
112
112
113 if len(tombstone) > flog.rawsize(crev):
113 if len(tombstone) > flog.rawsize(crev):
114 raise error.Abort(_(
114 raise error.Abort(_(
115 'censor tombstone must be no longer than censored data'))
115 'censor tombstone must be no longer than censored data'))
116
116
117 # Using two files instead of one makes it easy to rewrite entry-by-entry
117 # Using two files instead of one makes it easy to rewrite entry-by-entry
118 idxread = repo.svfs(flog.indexfile, 'r')
118 idxread = repo.svfs(flog.indexfile, 'r')
119 idxwrite = repo.svfs(flog.indexfile, 'wb', atomictemp=True)
119 idxwrite = repo.svfs(flog.indexfile, 'wb', atomictemp=True)
120 if flog.version & revlog.REVLOGNGINLINEDATA:
120 if flog.version & revlog.REVLOGNGINLINEDATA:
121 dataread, datawrite = idxread, idxwrite
121 dataread, datawrite = idxread, idxwrite
122 else:
122 else:
123 dataread = repo.svfs(flog.datafile, 'r')
123 dataread = repo.svfs(flog.datafile, 'r')
124 datawrite = repo.svfs(flog.datafile, 'wb', atomictemp=True)
124 datawrite = repo.svfs(flog.datafile, 'wb', atomictemp=True)
125
125
126 # Copy all revlog data up to the entry to be censored.
126 # Copy all revlog data up to the entry to be censored.
127 rio = revlog.revlogio()
127 rio = revlog.revlogio()
128 offset = flog.start(crev)
128 offset = flog.start(crev)
129
129
130 for chunk in util.filechunkiter(idxread, limit=crev * rio.size):
130 for chunk in util.filechunkiter(idxread, limit=crev * rio.size):
131 idxwrite.write(chunk)
131 idxwrite.write(chunk)
132 for chunk in util.filechunkiter(dataread, limit=offset):
132 for chunk in util.filechunkiter(dataread, limit=offset):
133 datawrite.write(chunk)
133 datawrite.write(chunk)
134
134
135 def rewriteindex(r, newoffs, newdata=None):
135 def rewriteindex(r, newoffs, newdata=None):
136 """Rewrite the index entry with a new data offset and optional new data.
136 """Rewrite the index entry with a new data offset and optional new data.
137
137
138 The newdata argument, if given, is a tuple of three positive integers:
138 The newdata argument, if given, is a tuple of three positive integers:
139 (new compressed, new uncompressed, added flag bits).
139 (new compressed, new uncompressed, added flag bits).
140 """
140 """
141 offlags, comp, uncomp, base, link, p1, p2, nodeid = flog.index[r]
141 offlags, comp, uncomp, base, link, p1, p2, nodeid = flog.index[r]
142 flags = revlog.gettype(offlags)
142 flags = revlog.gettype(offlags)
143 if newdata:
143 if newdata:
144 comp, uncomp, nflags = newdata
144 comp, uncomp, nflags = newdata
145 flags |= nflags
145 flags |= nflags
146 offlags = revlog.offset_type(newoffs, flags)
146 offlags = revlog.offset_type(newoffs, flags)
147 e = (offlags, comp, uncomp, r, link, p1, p2, nodeid)
147 e = (offlags, comp, uncomp, r, link, p1, p2, nodeid)
148 idxwrite.write(rio.packentry(e, None, flog.version, r))
148 idxwrite.write(rio.packentry(e, None, flog.version, r))
149 idxread.seek(rio.size, 1)
149 idxread.seek(rio.size, 1)
150
150
151 def rewrite(r, offs, data, nflags=revlog.REVIDX_DEFAULT_FLAGS):
151 def rewrite(r, offs, data, nflags=revlog.REVIDX_DEFAULT_FLAGS):
152 """Write the given full text to the filelog with the given data offset.
152 """Write the given full text to the filelog with the given data offset.
153
153
154 Returns:
154 Returns:
155 The integer number of data bytes written, for tracking data offsets.
155 The integer number of data bytes written, for tracking data offsets.
156 """
156 """
157 flag, compdata = flog.compress(data)
157 flag, compdata = flog.compress(data)
158 newcomp = len(flag) + len(compdata)
158 newcomp = len(flag) + len(compdata)
159 rewriteindex(r, offs, (newcomp, len(data), nflags))
159 rewriteindex(r, offs, (newcomp, len(data), nflags))
160 datawrite.write(flag)
160 datawrite.write(flag)
161 datawrite.write(compdata)
161 datawrite.write(compdata)
162 dataread.seek(flog.length(r), 1)
162 dataread.seek(flog.length(r), 1)
163 return newcomp
163 return newcomp
164
164
165 # Rewrite censored revlog entry with (padded) tombstone data.
165 # Rewrite censored revlog entry with (padded) tombstone data.
166 pad = ' ' * (flog.rawsize(crev) - len(tombstone))
166 pad = ' ' * (flog.rawsize(crev) - len(tombstone))
167 offset += rewrite(crev, offset, tombstone + pad, revlog.REVIDX_ISCENSORED)
167 offset += rewrite(crev, offset, tombstone + pad, revlog.REVIDX_ISCENSORED)
168
168
169 # Rewrite all following filelog revisions fixing up offsets and deltas.
169 # Rewrite all following filelog revisions fixing up offsets and deltas.
170 for srev in xrange(crev + 1, len(flog)):
170 for srev in xrange(crev + 1, len(flog)):
171 if crev in flog.parentrevs(srev):
171 if crev in flog.parentrevs(srev):
172 # Immediate children of censored node must be re-added as fulltext.
172 # Immediate children of censored node must be re-added as fulltext.
173 try:
173 try:
174 revdata = flog.revision(srev)
174 revdata = flog.revision(srev)
175 except error.CensoredNodeError as e:
175 except error.CensoredNodeError as e:
176 revdata = e.tombstone
176 revdata = e.tombstone
177 dlen = rewrite(srev, offset, revdata)
177 dlen = rewrite(srev, offset, revdata)
178 else:
178 else:
179 # Copy any other revision data verbatim after fixing up the offset.
179 # Copy any other revision data verbatim after fixing up the offset.
180 rewriteindex(srev, offset)
180 rewriteindex(srev, offset)
181 dlen = flog.length(srev)
181 dlen = flog.length(srev)
182 for chunk in util.filechunkiter(dataread, limit=dlen):
182 for chunk in util.filechunkiter(dataread, limit=dlen):
183 datawrite.write(chunk)
183 datawrite.write(chunk)
184 offset += dlen
184 offset += dlen
185
185
186 idxread.close()
186 idxread.close()
187 idxwrite.close()
187 idxwrite.close()
188 if dataread is not idxread:
188 if dataread is not idxread:
189 dataread.close()
189 dataread.close()
190 datawrite.close()
190 datawrite.close()
@@ -1,643 +1,643 b''
1 # chgserver.py - command server extension for cHg
1 # chgserver.py - command server extension for cHg
2 #
2 #
3 # Copyright 2011 Yuya Nishihara <yuya@tcha.org>
3 # Copyright 2011 Yuya Nishihara <yuya@tcha.org>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """command server extension for cHg (EXPERIMENTAL)
8 """command server extension for cHg (EXPERIMENTAL)
9
9
10 'S' channel (read/write)
10 'S' channel (read/write)
11 propagate ui.system() request to client
11 propagate ui.system() request to client
12
12
13 'attachio' command
13 'attachio' command
14 attach client's stdio passed by sendmsg()
14 attach client's stdio passed by sendmsg()
15
15
16 'chdir' command
16 'chdir' command
17 change current directory
17 change current directory
18
18
19 'getpager' command
19 'getpager' command
20 checks if pager is enabled and which pager should be executed
20 checks if pager is enabled and which pager should be executed
21
21
22 'setenv' command
22 'setenv' command
23 replace os.environ completely
23 replace os.environ completely
24
24
25 'setumask' command
25 'setumask' command
26 set umask
26 set umask
27
27
28 'validate' command
28 'validate' command
29 reload the config and check if the server is up to date
29 reload the config and check if the server is up to date
30
30
31 Config
31 Config
32 ------
32 ------
33
33
34 ::
34 ::
35
35
36 [chgserver]
36 [chgserver]
37 idletimeout = 3600 # seconds, after which an idle server will exit
37 idletimeout = 3600 # seconds, after which an idle server will exit
38 skiphash = False # whether to skip config or env change checks
38 skiphash = False # whether to skip config or env change checks
39 """
39 """
40
40
41 from __future__ import absolute_import
41 from __future__ import absolute_import
42
42
43 import errno
43 import errno
44 import hashlib
44 import hashlib
45 import inspect
45 import inspect
46 import os
46 import os
47 import re
47 import re
48 import signal
48 import signal
49 import struct
49 import struct
50 import sys
50 import sys
51 import time
51 import time
52
52
53 from mercurial.i18n import _
53 from mercurial.i18n import _
54
54
55 from mercurial import (
55 from mercurial import (
56 cmdutil,
56 cmdutil,
57 commands,
57 commands,
58 commandserver,
58 commandserver,
59 dispatch,
59 dispatch,
60 error,
60 error,
61 extensions,
61 extensions,
62 osutil,
62 osutil,
63 util,
63 util,
64 )
64 )
65
65
66 # Note for extension authors: ONLY specify testedwith = 'internal' for
66 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
67 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
67 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
68 # be specifying the version(s) of Mercurial they are tested with, or
68 # be specifying the version(s) of Mercurial they are tested with, or
69 # leave the attribute unspecified.
69 # leave the attribute unspecified.
70 testedwith = 'internal'
70 testedwith = 'ships-with-hg-core'
71
71
72 _log = commandserver.log
72 _log = commandserver.log
73
73
74 def _hashlist(items):
74 def _hashlist(items):
75 """return sha1 hexdigest for a list"""
75 """return sha1 hexdigest for a list"""
76 return hashlib.sha1(str(items)).hexdigest()
76 return hashlib.sha1(str(items)).hexdigest()
77
77
78 # sensitive config sections affecting confighash
78 # sensitive config sections affecting confighash
79 _configsections = [
79 _configsections = [
80 'alias', # affects global state commands.table
80 'alias', # affects global state commands.table
81 'extdiff', # uisetup will register new commands
81 'extdiff', # uisetup will register new commands
82 'extensions',
82 'extensions',
83 ]
83 ]
84
84
85 # sensitive environment variables affecting confighash
85 # sensitive environment variables affecting confighash
86 _envre = re.compile(r'''\A(?:
86 _envre = re.compile(r'''\A(?:
87 CHGHG
87 CHGHG
88 |HG.*
88 |HG.*
89 |LANG(?:UAGE)?
89 |LANG(?:UAGE)?
90 |LC_.*
90 |LC_.*
91 |LD_.*
91 |LD_.*
92 |PATH
92 |PATH
93 |PYTHON.*
93 |PYTHON.*
94 |TERM(?:INFO)?
94 |TERM(?:INFO)?
95 |TZ
95 |TZ
96 )\Z''', re.X)
96 )\Z''', re.X)
97
97
98 def _confighash(ui):
98 def _confighash(ui):
99 """return a quick hash for detecting config/env changes
99 """return a quick hash for detecting config/env changes
100
100
101 confighash is the hash of sensitive config items and environment variables.
101 confighash is the hash of sensitive config items and environment variables.
102
102
103 for chgserver, it is designed that once confighash changes, the server is
103 for chgserver, it is designed that once confighash changes, the server is
104 not qualified to serve its client and should redirect the client to a new
104 not qualified to serve its client and should redirect the client to a new
105 server. different from mtimehash, confighash change will not mark the
105 server. different from mtimehash, confighash change will not mark the
106 server outdated and exit since the user can have different configs at the
106 server outdated and exit since the user can have different configs at the
107 same time.
107 same time.
108 """
108 """
109 sectionitems = []
109 sectionitems = []
110 for section in _configsections:
110 for section in _configsections:
111 sectionitems.append(ui.configitems(section))
111 sectionitems.append(ui.configitems(section))
112 sectionhash = _hashlist(sectionitems)
112 sectionhash = _hashlist(sectionitems)
113 envitems = [(k, v) for k, v in os.environ.iteritems() if _envre.match(k)]
113 envitems = [(k, v) for k, v in os.environ.iteritems() if _envre.match(k)]
114 envhash = _hashlist(sorted(envitems))
114 envhash = _hashlist(sorted(envitems))
115 return sectionhash[:6] + envhash[:6]
115 return sectionhash[:6] + envhash[:6]
116
116
117 def _getmtimepaths(ui):
117 def _getmtimepaths(ui):
118 """get a list of paths that should be checked to detect change
118 """get a list of paths that should be checked to detect change
119
119
120 The list will include:
120 The list will include:
121 - extensions (will not cover all files for complex extensions)
121 - extensions (will not cover all files for complex extensions)
122 - mercurial/__version__.py
122 - mercurial/__version__.py
123 - python binary
123 - python binary
124 """
124 """
125 modules = [m for n, m in extensions.extensions(ui)]
125 modules = [m for n, m in extensions.extensions(ui)]
126 try:
126 try:
127 from mercurial import __version__
127 from mercurial import __version__
128 modules.append(__version__)
128 modules.append(__version__)
129 except ImportError:
129 except ImportError:
130 pass
130 pass
131 files = [sys.executable]
131 files = [sys.executable]
132 for m in modules:
132 for m in modules:
133 try:
133 try:
134 files.append(inspect.getabsfile(m))
134 files.append(inspect.getabsfile(m))
135 except TypeError:
135 except TypeError:
136 pass
136 pass
137 return sorted(set(files))
137 return sorted(set(files))
138
138
139 def _mtimehash(paths):
139 def _mtimehash(paths):
140 """return a quick hash for detecting file changes
140 """return a quick hash for detecting file changes
141
141
142 mtimehash calls stat on given paths and calculate a hash based on size and
142 mtimehash calls stat on given paths and calculate a hash based on size and
143 mtime of each file. mtimehash does not read file content because reading is
143 mtime of each file. mtimehash does not read file content because reading is
144 expensive. therefore it's not 100% reliable for detecting content changes.
144 expensive. therefore it's not 100% reliable for detecting content changes.
145 it's possible to return different hashes for same file contents.
145 it's possible to return different hashes for same file contents.
146 it's also possible to return a same hash for different file contents for
146 it's also possible to return a same hash for different file contents for
147 some carefully crafted situation.
147 some carefully crafted situation.
148
148
149 for chgserver, it is designed that once mtimehash changes, the server is
149 for chgserver, it is designed that once mtimehash changes, the server is
150 considered outdated immediately and should no longer provide service.
150 considered outdated immediately and should no longer provide service.
151
151
152 mtimehash is not included in confighash because we only know the paths of
152 mtimehash is not included in confighash because we only know the paths of
153 extensions after importing them (there is imp.find_module but that faces
153 extensions after importing them (there is imp.find_module but that faces
154 race conditions). We need to calculate confighash without importing.
154 race conditions). We need to calculate confighash without importing.
155 """
155 """
156 def trystat(path):
156 def trystat(path):
157 try:
157 try:
158 st = os.stat(path)
158 st = os.stat(path)
159 return (st.st_mtime, st.st_size)
159 return (st.st_mtime, st.st_size)
160 except OSError:
160 except OSError:
161 # could be ENOENT, EPERM etc. not fatal in any case
161 # could be ENOENT, EPERM etc. not fatal in any case
162 pass
162 pass
163 return _hashlist(map(trystat, paths))[:12]
163 return _hashlist(map(trystat, paths))[:12]
164
164
165 class hashstate(object):
165 class hashstate(object):
166 """a structure storing confighash, mtimehash, paths used for mtimehash"""
166 """a structure storing confighash, mtimehash, paths used for mtimehash"""
167 def __init__(self, confighash, mtimehash, mtimepaths):
167 def __init__(self, confighash, mtimehash, mtimepaths):
168 self.confighash = confighash
168 self.confighash = confighash
169 self.mtimehash = mtimehash
169 self.mtimehash = mtimehash
170 self.mtimepaths = mtimepaths
170 self.mtimepaths = mtimepaths
171
171
172 @staticmethod
172 @staticmethod
173 def fromui(ui, mtimepaths=None):
173 def fromui(ui, mtimepaths=None):
174 if mtimepaths is None:
174 if mtimepaths is None:
175 mtimepaths = _getmtimepaths(ui)
175 mtimepaths = _getmtimepaths(ui)
176 confighash = _confighash(ui)
176 confighash = _confighash(ui)
177 mtimehash = _mtimehash(mtimepaths)
177 mtimehash = _mtimehash(mtimepaths)
178 _log('confighash = %s mtimehash = %s\n' % (confighash, mtimehash))
178 _log('confighash = %s mtimehash = %s\n' % (confighash, mtimehash))
179 return hashstate(confighash, mtimehash, mtimepaths)
179 return hashstate(confighash, mtimehash, mtimepaths)
180
180
181 # copied from hgext/pager.py:uisetup()
181 # copied from hgext/pager.py:uisetup()
182 def _setuppagercmd(ui, options, cmd):
182 def _setuppagercmd(ui, options, cmd):
183 if not ui.formatted():
183 if not ui.formatted():
184 return
184 return
185
185
186 p = ui.config("pager", "pager", os.environ.get("PAGER"))
186 p = ui.config("pager", "pager", os.environ.get("PAGER"))
187 usepager = False
187 usepager = False
188 always = util.parsebool(options['pager'])
188 always = util.parsebool(options['pager'])
189 auto = options['pager'] == 'auto'
189 auto = options['pager'] == 'auto'
190
190
191 if not p:
191 if not p:
192 pass
192 pass
193 elif always:
193 elif always:
194 usepager = True
194 usepager = True
195 elif not auto:
195 elif not auto:
196 usepager = False
196 usepager = False
197 else:
197 else:
198 attended = ['annotate', 'cat', 'diff', 'export', 'glog', 'log', 'qdiff']
198 attended = ['annotate', 'cat', 'diff', 'export', 'glog', 'log', 'qdiff']
199 attend = ui.configlist('pager', 'attend', attended)
199 attend = ui.configlist('pager', 'attend', attended)
200 ignore = ui.configlist('pager', 'ignore')
200 ignore = ui.configlist('pager', 'ignore')
201 cmds, _ = cmdutil.findcmd(cmd, commands.table)
201 cmds, _ = cmdutil.findcmd(cmd, commands.table)
202
202
203 for cmd in cmds:
203 for cmd in cmds:
204 var = 'attend-%s' % cmd
204 var = 'attend-%s' % cmd
205 if ui.config('pager', var):
205 if ui.config('pager', var):
206 usepager = ui.configbool('pager', var)
206 usepager = ui.configbool('pager', var)
207 break
207 break
208 if (cmd in attend or
208 if (cmd in attend or
209 (cmd not in ignore and not attend)):
209 (cmd not in ignore and not attend)):
210 usepager = True
210 usepager = True
211 break
211 break
212
212
213 if usepager:
213 if usepager:
214 ui.setconfig('ui', 'formatted', ui.formatted(), 'pager')
214 ui.setconfig('ui', 'formatted', ui.formatted(), 'pager')
215 ui.setconfig('ui', 'interactive', False, 'pager')
215 ui.setconfig('ui', 'interactive', False, 'pager')
216 return p
216 return p
217
217
218 def _newchgui(srcui, csystem):
218 def _newchgui(srcui, csystem):
219 class chgui(srcui.__class__):
219 class chgui(srcui.__class__):
220 def __init__(self, src=None):
220 def __init__(self, src=None):
221 super(chgui, self).__init__(src)
221 super(chgui, self).__init__(src)
222 if src:
222 if src:
223 self._csystem = getattr(src, '_csystem', csystem)
223 self._csystem = getattr(src, '_csystem', csystem)
224 else:
224 else:
225 self._csystem = csystem
225 self._csystem = csystem
226
226
227 def system(self, cmd, environ=None, cwd=None, onerr=None,
227 def system(self, cmd, environ=None, cwd=None, onerr=None,
228 errprefix=None):
228 errprefix=None):
229 # fallback to the original system method if the output needs to be
229 # fallback to the original system method if the output needs to be
230 # captured (to self._buffers), or the output stream is not stdout
230 # captured (to self._buffers), or the output stream is not stdout
231 # (e.g. stderr, cStringIO), because the chg client is not aware of
231 # (e.g. stderr, cStringIO), because the chg client is not aware of
232 # these situations and will behave differently (write to stdout).
232 # these situations and will behave differently (write to stdout).
233 if (any(s[1] for s in self._bufferstates)
233 if (any(s[1] for s in self._bufferstates)
234 or not util.safehasattr(self.fout, 'fileno')
234 or not util.safehasattr(self.fout, 'fileno')
235 or self.fout.fileno() != sys.stdout.fileno()):
235 or self.fout.fileno() != sys.stdout.fileno()):
236 return super(chgui, self).system(cmd, environ, cwd, onerr,
236 return super(chgui, self).system(cmd, environ, cwd, onerr,
237 errprefix)
237 errprefix)
238 # copied from mercurial/util.py:system()
238 # copied from mercurial/util.py:system()
239 self.flush()
239 self.flush()
240 def py2shell(val):
240 def py2shell(val):
241 if val is None or val is False:
241 if val is None or val is False:
242 return '0'
242 return '0'
243 if val is True:
243 if val is True:
244 return '1'
244 return '1'
245 return str(val)
245 return str(val)
246 env = os.environ.copy()
246 env = os.environ.copy()
247 if environ:
247 if environ:
248 env.update((k, py2shell(v)) for k, v in environ.iteritems())
248 env.update((k, py2shell(v)) for k, v in environ.iteritems())
249 env['HG'] = util.hgexecutable()
249 env['HG'] = util.hgexecutable()
250 rc = self._csystem(cmd, env, cwd)
250 rc = self._csystem(cmd, env, cwd)
251 if rc and onerr:
251 if rc and onerr:
252 errmsg = '%s %s' % (os.path.basename(cmd.split(None, 1)[0]),
252 errmsg = '%s %s' % (os.path.basename(cmd.split(None, 1)[0]),
253 util.explainexit(rc)[0])
253 util.explainexit(rc)[0])
254 if errprefix:
254 if errprefix:
255 errmsg = '%s: %s' % (errprefix, errmsg)
255 errmsg = '%s: %s' % (errprefix, errmsg)
256 raise onerr(errmsg)
256 raise onerr(errmsg)
257 return rc
257 return rc
258
258
259 return chgui(srcui)
259 return chgui(srcui)
260
260
261 def _loadnewui(srcui, args):
261 def _loadnewui(srcui, args):
262 newui = srcui.__class__()
262 newui = srcui.__class__()
263 for a in ['fin', 'fout', 'ferr', 'environ']:
263 for a in ['fin', 'fout', 'ferr', 'environ']:
264 setattr(newui, a, getattr(srcui, a))
264 setattr(newui, a, getattr(srcui, a))
265 if util.safehasattr(srcui, '_csystem'):
265 if util.safehasattr(srcui, '_csystem'):
266 newui._csystem = srcui._csystem
266 newui._csystem = srcui._csystem
267
267
268 # internal config: extensions.chgserver
268 # internal config: extensions.chgserver
269 newui.setconfig('extensions', 'chgserver',
269 newui.setconfig('extensions', 'chgserver',
270 srcui.config('extensions', 'chgserver'), '--config')
270 srcui.config('extensions', 'chgserver'), '--config')
271
271
272 # command line args
272 # command line args
273 args = args[:]
273 args = args[:]
274 dispatch._parseconfig(newui, dispatch._earlygetopt(['--config'], args))
274 dispatch._parseconfig(newui, dispatch._earlygetopt(['--config'], args))
275
275
276 # stolen from tortoisehg.util.copydynamicconfig()
276 # stolen from tortoisehg.util.copydynamicconfig()
277 for section, name, value in srcui.walkconfig():
277 for section, name, value in srcui.walkconfig():
278 source = srcui.configsource(section, name)
278 source = srcui.configsource(section, name)
279 if ':' in source or source == '--config':
279 if ':' in source or source == '--config':
280 # path:line or command line
280 # path:line or command line
281 continue
281 continue
282 if source == 'none':
282 if source == 'none':
283 # ui.configsource returns 'none' by default
283 # ui.configsource returns 'none' by default
284 source = ''
284 source = ''
285 newui.setconfig(section, name, value, source)
285 newui.setconfig(section, name, value, source)
286
286
287 # load wd and repo config, copied from dispatch.py
287 # load wd and repo config, copied from dispatch.py
288 cwds = dispatch._earlygetopt(['--cwd'], args)
288 cwds = dispatch._earlygetopt(['--cwd'], args)
289 cwd = cwds and os.path.realpath(cwds[-1]) or None
289 cwd = cwds and os.path.realpath(cwds[-1]) or None
290 rpath = dispatch._earlygetopt(["-R", "--repository", "--repo"], args)
290 rpath = dispatch._earlygetopt(["-R", "--repository", "--repo"], args)
291 path, newlui = dispatch._getlocal(newui, rpath, wd=cwd)
291 path, newlui = dispatch._getlocal(newui, rpath, wd=cwd)
292
292
293 return (newui, newlui)
293 return (newui, newlui)
294
294
295 class channeledsystem(object):
295 class channeledsystem(object):
296 """Propagate ui.system() request in the following format:
296 """Propagate ui.system() request in the following format:
297
297
298 payload length (unsigned int),
298 payload length (unsigned int),
299 cmd, '\0',
299 cmd, '\0',
300 cwd, '\0',
300 cwd, '\0',
301 envkey, '=', val, '\0',
301 envkey, '=', val, '\0',
302 ...
302 ...
303 envkey, '=', val
303 envkey, '=', val
304
304
305 and waits:
305 and waits:
306
306
307 exitcode length (unsigned int),
307 exitcode length (unsigned int),
308 exitcode (int)
308 exitcode (int)
309 """
309 """
310 def __init__(self, in_, out, channel):
310 def __init__(self, in_, out, channel):
311 self.in_ = in_
311 self.in_ = in_
312 self.out = out
312 self.out = out
313 self.channel = channel
313 self.channel = channel
314
314
315 def __call__(self, cmd, environ, cwd):
315 def __call__(self, cmd, environ, cwd):
316 args = [util.quotecommand(cmd), os.path.abspath(cwd or '.')]
316 args = [util.quotecommand(cmd), os.path.abspath(cwd or '.')]
317 args.extend('%s=%s' % (k, v) for k, v in environ.iteritems())
317 args.extend('%s=%s' % (k, v) for k, v in environ.iteritems())
318 data = '\0'.join(args)
318 data = '\0'.join(args)
319 self.out.write(struct.pack('>cI', self.channel, len(data)))
319 self.out.write(struct.pack('>cI', self.channel, len(data)))
320 self.out.write(data)
320 self.out.write(data)
321 self.out.flush()
321 self.out.flush()
322
322
323 length = self.in_.read(4)
323 length = self.in_.read(4)
324 length, = struct.unpack('>I', length)
324 length, = struct.unpack('>I', length)
325 if length != 4:
325 if length != 4:
326 raise error.Abort(_('invalid response'))
326 raise error.Abort(_('invalid response'))
327 rc, = struct.unpack('>i', self.in_.read(4))
327 rc, = struct.unpack('>i', self.in_.read(4))
328 return rc
328 return rc
329
329
330 _iochannels = [
330 _iochannels = [
331 # server.ch, ui.fp, mode
331 # server.ch, ui.fp, mode
332 ('cin', 'fin', 'rb'),
332 ('cin', 'fin', 'rb'),
333 ('cout', 'fout', 'wb'),
333 ('cout', 'fout', 'wb'),
334 ('cerr', 'ferr', 'wb'),
334 ('cerr', 'ferr', 'wb'),
335 ]
335 ]
336
336
337 class chgcmdserver(commandserver.server):
337 class chgcmdserver(commandserver.server):
338 def __init__(self, ui, repo, fin, fout, sock, hashstate, baseaddress):
338 def __init__(self, ui, repo, fin, fout, sock, hashstate, baseaddress):
339 super(chgcmdserver, self).__init__(
339 super(chgcmdserver, self).__init__(
340 _newchgui(ui, channeledsystem(fin, fout, 'S')), repo, fin, fout)
340 _newchgui(ui, channeledsystem(fin, fout, 'S')), repo, fin, fout)
341 self.clientsock = sock
341 self.clientsock = sock
342 self._oldios = [] # original (self.ch, ui.fp, fd) before "attachio"
342 self._oldios = [] # original (self.ch, ui.fp, fd) before "attachio"
343 self.hashstate = hashstate
343 self.hashstate = hashstate
344 self.baseaddress = baseaddress
344 self.baseaddress = baseaddress
345 if hashstate is not None:
345 if hashstate is not None:
346 self.capabilities = self.capabilities.copy()
346 self.capabilities = self.capabilities.copy()
347 self.capabilities['validate'] = chgcmdserver.validate
347 self.capabilities['validate'] = chgcmdserver.validate
348
348
349 def cleanup(self):
349 def cleanup(self):
350 super(chgcmdserver, self).cleanup()
350 super(chgcmdserver, self).cleanup()
351 # dispatch._runcatch() does not flush outputs if exception is not
351 # dispatch._runcatch() does not flush outputs if exception is not
352 # handled by dispatch._dispatch()
352 # handled by dispatch._dispatch()
353 self.ui.flush()
353 self.ui.flush()
354 self._restoreio()
354 self._restoreio()
355
355
356 def attachio(self):
356 def attachio(self):
357 """Attach to client's stdio passed via unix domain socket; all
357 """Attach to client's stdio passed via unix domain socket; all
358 channels except cresult will no longer be used
358 channels except cresult will no longer be used
359 """
359 """
360 # tell client to sendmsg() with 1-byte payload, which makes it
360 # tell client to sendmsg() with 1-byte payload, which makes it
361 # distinctive from "attachio\n" command consumed by client.read()
361 # distinctive from "attachio\n" command consumed by client.read()
362 self.clientsock.sendall(struct.pack('>cI', 'I', 1))
362 self.clientsock.sendall(struct.pack('>cI', 'I', 1))
363 clientfds = osutil.recvfds(self.clientsock.fileno())
363 clientfds = osutil.recvfds(self.clientsock.fileno())
364 _log('received fds: %r\n' % clientfds)
364 _log('received fds: %r\n' % clientfds)
365
365
366 ui = self.ui
366 ui = self.ui
367 ui.flush()
367 ui.flush()
368 first = self._saveio()
368 first = self._saveio()
369 for fd, (cn, fn, mode) in zip(clientfds, _iochannels):
369 for fd, (cn, fn, mode) in zip(clientfds, _iochannels):
370 assert fd > 0
370 assert fd > 0
371 fp = getattr(ui, fn)
371 fp = getattr(ui, fn)
372 os.dup2(fd, fp.fileno())
372 os.dup2(fd, fp.fileno())
373 os.close(fd)
373 os.close(fd)
374 if not first:
374 if not first:
375 continue
375 continue
376 # reset buffering mode when client is first attached. as we want
376 # reset buffering mode when client is first attached. as we want
377 # to see output immediately on pager, the mode stays unchanged
377 # to see output immediately on pager, the mode stays unchanged
378 # when client re-attached. ferr is unchanged because it should
378 # when client re-attached. ferr is unchanged because it should
379 # be unbuffered no matter if it is a tty or not.
379 # be unbuffered no matter if it is a tty or not.
380 if fn == 'ferr':
380 if fn == 'ferr':
381 newfp = fp
381 newfp = fp
382 else:
382 else:
383 # make it line buffered explicitly because the default is
383 # make it line buffered explicitly because the default is
384 # decided on first write(), where fout could be a pager.
384 # decided on first write(), where fout could be a pager.
385 if fp.isatty():
385 if fp.isatty():
386 bufsize = 1 # line buffered
386 bufsize = 1 # line buffered
387 else:
387 else:
388 bufsize = -1 # system default
388 bufsize = -1 # system default
389 newfp = os.fdopen(fp.fileno(), mode, bufsize)
389 newfp = os.fdopen(fp.fileno(), mode, bufsize)
390 setattr(ui, fn, newfp)
390 setattr(ui, fn, newfp)
391 setattr(self, cn, newfp)
391 setattr(self, cn, newfp)
392
392
393 self.cresult.write(struct.pack('>i', len(clientfds)))
393 self.cresult.write(struct.pack('>i', len(clientfds)))
394
394
395 def _saveio(self):
395 def _saveio(self):
396 if self._oldios:
396 if self._oldios:
397 return False
397 return False
398 ui = self.ui
398 ui = self.ui
399 for cn, fn, _mode in _iochannels:
399 for cn, fn, _mode in _iochannels:
400 ch = getattr(self, cn)
400 ch = getattr(self, cn)
401 fp = getattr(ui, fn)
401 fp = getattr(ui, fn)
402 fd = os.dup(fp.fileno())
402 fd = os.dup(fp.fileno())
403 self._oldios.append((ch, fp, fd))
403 self._oldios.append((ch, fp, fd))
404 return True
404 return True
405
405
406 def _restoreio(self):
406 def _restoreio(self):
407 ui = self.ui
407 ui = self.ui
408 for (ch, fp, fd), (cn, fn, _mode) in zip(self._oldios, _iochannels):
408 for (ch, fp, fd), (cn, fn, _mode) in zip(self._oldios, _iochannels):
409 newfp = getattr(ui, fn)
409 newfp = getattr(ui, fn)
410 # close newfp while it's associated with client; otherwise it
410 # close newfp while it's associated with client; otherwise it
411 # would be closed when newfp is deleted
411 # would be closed when newfp is deleted
412 if newfp is not fp:
412 if newfp is not fp:
413 newfp.close()
413 newfp.close()
414 # restore original fd: fp is open again
414 # restore original fd: fp is open again
415 os.dup2(fd, fp.fileno())
415 os.dup2(fd, fp.fileno())
416 os.close(fd)
416 os.close(fd)
417 setattr(self, cn, ch)
417 setattr(self, cn, ch)
418 setattr(ui, fn, fp)
418 setattr(ui, fn, fp)
419 del self._oldios[:]
419 del self._oldios[:]
420
420
421 def validate(self):
421 def validate(self):
422 """Reload the config and check if the server is up to date
422 """Reload the config and check if the server is up to date
423
423
424 Read a list of '\0' separated arguments.
424 Read a list of '\0' separated arguments.
425 Write a non-empty list of '\0' separated instruction strings or '\0'
425 Write a non-empty list of '\0' separated instruction strings or '\0'
426 if the list is empty.
426 if the list is empty.
427 An instruction string could be either:
427 An instruction string could be either:
428 - "unlink $path", the client should unlink the path to stop the
428 - "unlink $path", the client should unlink the path to stop the
429 outdated server.
429 outdated server.
430 - "redirect $path", the client should attempt to connect to $path
430 - "redirect $path", the client should attempt to connect to $path
431 first. If it does not work, start a new server. It implies
431 first. If it does not work, start a new server. It implies
432 "reconnect".
432 "reconnect".
433 - "exit $n", the client should exit directly with code n.
433 - "exit $n", the client should exit directly with code n.
434 This may happen if we cannot parse the config.
434 This may happen if we cannot parse the config.
435 - "reconnect", the client should close the connection and
435 - "reconnect", the client should close the connection and
436 reconnect.
436 reconnect.
437 If neither "reconnect" nor "redirect" is included in the instruction
437 If neither "reconnect" nor "redirect" is included in the instruction
438 list, the client can continue with this server after completing all
438 list, the client can continue with this server after completing all
439 the instructions.
439 the instructions.
440 """
440 """
441 args = self._readlist()
441 args = self._readlist()
442 try:
442 try:
443 self.ui, lui = _loadnewui(self.ui, args)
443 self.ui, lui = _loadnewui(self.ui, args)
444 except error.ParseError as inst:
444 except error.ParseError as inst:
445 dispatch._formatparse(self.ui.warn, inst)
445 dispatch._formatparse(self.ui.warn, inst)
446 self.ui.flush()
446 self.ui.flush()
447 self.cresult.write('exit 255')
447 self.cresult.write('exit 255')
448 return
448 return
449 newhash = hashstate.fromui(lui, self.hashstate.mtimepaths)
449 newhash = hashstate.fromui(lui, self.hashstate.mtimepaths)
450 insts = []
450 insts = []
451 if newhash.mtimehash != self.hashstate.mtimehash:
451 if newhash.mtimehash != self.hashstate.mtimehash:
452 addr = _hashaddress(self.baseaddress, self.hashstate.confighash)
452 addr = _hashaddress(self.baseaddress, self.hashstate.confighash)
453 insts.append('unlink %s' % addr)
453 insts.append('unlink %s' % addr)
454 # mtimehash is empty if one or more extensions fail to load.
454 # mtimehash is empty if one or more extensions fail to load.
455 # to be compatible with hg, still serve the client this time.
455 # to be compatible with hg, still serve the client this time.
456 if self.hashstate.mtimehash:
456 if self.hashstate.mtimehash:
457 insts.append('reconnect')
457 insts.append('reconnect')
458 if newhash.confighash != self.hashstate.confighash:
458 if newhash.confighash != self.hashstate.confighash:
459 addr = _hashaddress(self.baseaddress, newhash.confighash)
459 addr = _hashaddress(self.baseaddress, newhash.confighash)
460 insts.append('redirect %s' % addr)
460 insts.append('redirect %s' % addr)
461 _log('validate: %s\n' % insts)
461 _log('validate: %s\n' % insts)
462 self.cresult.write('\0'.join(insts) or '\0')
462 self.cresult.write('\0'.join(insts) or '\0')
463
463
464 def chdir(self):
464 def chdir(self):
465 """Change current directory
465 """Change current directory
466
466
467 Note that the behavior of --cwd option is bit different from this.
467 Note that the behavior of --cwd option is bit different from this.
468 It does not affect --config parameter.
468 It does not affect --config parameter.
469 """
469 """
470 path = self._readstr()
470 path = self._readstr()
471 if not path:
471 if not path:
472 return
472 return
473 _log('chdir to %r\n' % path)
473 _log('chdir to %r\n' % path)
474 os.chdir(path)
474 os.chdir(path)
475
475
476 def setumask(self):
476 def setumask(self):
477 """Change umask"""
477 """Change umask"""
478 mask = struct.unpack('>I', self._read(4))[0]
478 mask = struct.unpack('>I', self._read(4))[0]
479 _log('setumask %r\n' % mask)
479 _log('setumask %r\n' % mask)
480 os.umask(mask)
480 os.umask(mask)
481
481
482 def getpager(self):
482 def getpager(self):
483 """Read cmdargs and write pager command to r-channel if enabled
483 """Read cmdargs and write pager command to r-channel if enabled
484
484
485 If pager isn't enabled, this writes '\0' because channeledoutput
485 If pager isn't enabled, this writes '\0' because channeledoutput
486 does not allow to write empty data.
486 does not allow to write empty data.
487 """
487 """
488 args = self._readlist()
488 args = self._readlist()
489 try:
489 try:
490 cmd, _func, args, options, _cmdoptions = dispatch._parse(self.ui,
490 cmd, _func, args, options, _cmdoptions = dispatch._parse(self.ui,
491 args)
491 args)
492 except (error.Abort, error.AmbiguousCommand, error.CommandError,
492 except (error.Abort, error.AmbiguousCommand, error.CommandError,
493 error.UnknownCommand):
493 error.UnknownCommand):
494 cmd = None
494 cmd = None
495 options = {}
495 options = {}
496 if not cmd or 'pager' not in options:
496 if not cmd or 'pager' not in options:
497 self.cresult.write('\0')
497 self.cresult.write('\0')
498 return
498 return
499
499
500 pagercmd = _setuppagercmd(self.ui, options, cmd)
500 pagercmd = _setuppagercmd(self.ui, options, cmd)
501 if pagercmd:
501 if pagercmd:
502 # Python's SIGPIPE is SIG_IGN by default. change to SIG_DFL so
502 # Python's SIGPIPE is SIG_IGN by default. change to SIG_DFL so
503 # we can exit if the pipe to the pager is closed
503 # we can exit if the pipe to the pager is closed
504 if util.safehasattr(signal, 'SIGPIPE') and \
504 if util.safehasattr(signal, 'SIGPIPE') and \
505 signal.getsignal(signal.SIGPIPE) == signal.SIG_IGN:
505 signal.getsignal(signal.SIGPIPE) == signal.SIG_IGN:
506 signal.signal(signal.SIGPIPE, signal.SIG_DFL)
506 signal.signal(signal.SIGPIPE, signal.SIG_DFL)
507 self.cresult.write(pagercmd)
507 self.cresult.write(pagercmd)
508 else:
508 else:
509 self.cresult.write('\0')
509 self.cresult.write('\0')
510
510
511 def setenv(self):
511 def setenv(self):
512 """Clear and update os.environ
512 """Clear and update os.environ
513
513
514 Note that not all variables can make an effect on the running process.
514 Note that not all variables can make an effect on the running process.
515 """
515 """
516 l = self._readlist()
516 l = self._readlist()
517 try:
517 try:
518 newenv = dict(s.split('=', 1) for s in l)
518 newenv = dict(s.split('=', 1) for s in l)
519 except ValueError:
519 except ValueError:
520 raise ValueError('unexpected value in setenv request')
520 raise ValueError('unexpected value in setenv request')
521 _log('setenv: %r\n' % sorted(newenv.keys()))
521 _log('setenv: %r\n' % sorted(newenv.keys()))
522 os.environ.clear()
522 os.environ.clear()
523 os.environ.update(newenv)
523 os.environ.update(newenv)
524
524
525 capabilities = commandserver.server.capabilities.copy()
525 capabilities = commandserver.server.capabilities.copy()
526 capabilities.update({'attachio': attachio,
526 capabilities.update({'attachio': attachio,
527 'chdir': chdir,
527 'chdir': chdir,
528 'getpager': getpager,
528 'getpager': getpager,
529 'setenv': setenv,
529 'setenv': setenv,
530 'setumask': setumask})
530 'setumask': setumask})
531
531
532 def _tempaddress(address):
532 def _tempaddress(address):
533 return '%s.%d.tmp' % (address, os.getpid())
533 return '%s.%d.tmp' % (address, os.getpid())
534
534
535 def _hashaddress(address, hashstr):
535 def _hashaddress(address, hashstr):
536 return '%s-%s' % (address, hashstr)
536 return '%s-%s' % (address, hashstr)
537
537
538 class chgunixservicehandler(object):
538 class chgunixservicehandler(object):
539 """Set of operations for chg services"""
539 """Set of operations for chg services"""
540
540
541 pollinterval = 1 # [sec]
541 pollinterval = 1 # [sec]
542
542
543 def __init__(self, ui):
543 def __init__(self, ui):
544 self.ui = ui
544 self.ui = ui
545 self._idletimeout = ui.configint('chgserver', 'idletimeout', 3600)
545 self._idletimeout = ui.configint('chgserver', 'idletimeout', 3600)
546 self._lastactive = time.time()
546 self._lastactive = time.time()
547
547
548 def bindsocket(self, sock, address):
548 def bindsocket(self, sock, address):
549 self._inithashstate(address)
549 self._inithashstate(address)
550 self._checkextensions()
550 self._checkextensions()
551 self._bind(sock)
551 self._bind(sock)
552 self._createsymlink()
552 self._createsymlink()
553
553
554 def _inithashstate(self, address):
554 def _inithashstate(self, address):
555 self._baseaddress = address
555 self._baseaddress = address
556 if self.ui.configbool('chgserver', 'skiphash', False):
556 if self.ui.configbool('chgserver', 'skiphash', False):
557 self._hashstate = None
557 self._hashstate = None
558 self._realaddress = address
558 self._realaddress = address
559 return
559 return
560 self._hashstate = hashstate.fromui(self.ui)
560 self._hashstate = hashstate.fromui(self.ui)
561 self._realaddress = _hashaddress(address, self._hashstate.confighash)
561 self._realaddress = _hashaddress(address, self._hashstate.confighash)
562
562
563 def _checkextensions(self):
563 def _checkextensions(self):
564 if not self._hashstate:
564 if not self._hashstate:
565 return
565 return
566 if extensions.notloaded():
566 if extensions.notloaded():
567 # one or more extensions failed to load. mtimehash becomes
567 # one or more extensions failed to load. mtimehash becomes
568 # meaningless because we do not know the paths of those extensions.
568 # meaningless because we do not know the paths of those extensions.
569 # set mtimehash to an illegal hash value to invalidate the server.
569 # set mtimehash to an illegal hash value to invalidate the server.
570 self._hashstate.mtimehash = ''
570 self._hashstate.mtimehash = ''
571
571
572 def _bind(self, sock):
572 def _bind(self, sock):
573 # use a unique temp address so we can stat the file and do ownership
573 # use a unique temp address so we can stat the file and do ownership
574 # check later
574 # check later
575 tempaddress = _tempaddress(self._realaddress)
575 tempaddress = _tempaddress(self._realaddress)
576 util.bindunixsocket(sock, tempaddress)
576 util.bindunixsocket(sock, tempaddress)
577 self._socketstat = os.stat(tempaddress)
577 self._socketstat = os.stat(tempaddress)
578 # rename will replace the old socket file if exists atomically. the
578 # rename will replace the old socket file if exists atomically. the
579 # old server will detect ownership change and exit.
579 # old server will detect ownership change and exit.
580 util.rename(tempaddress, self._realaddress)
580 util.rename(tempaddress, self._realaddress)
581
581
582 def _createsymlink(self):
582 def _createsymlink(self):
583 if self._baseaddress == self._realaddress:
583 if self._baseaddress == self._realaddress:
584 return
584 return
585 tempaddress = _tempaddress(self._baseaddress)
585 tempaddress = _tempaddress(self._baseaddress)
586 os.symlink(os.path.basename(self._realaddress), tempaddress)
586 os.symlink(os.path.basename(self._realaddress), tempaddress)
587 util.rename(tempaddress, self._baseaddress)
587 util.rename(tempaddress, self._baseaddress)
588
588
589 def _issocketowner(self):
589 def _issocketowner(self):
590 try:
590 try:
591 stat = os.stat(self._realaddress)
591 stat = os.stat(self._realaddress)
592 return (stat.st_ino == self._socketstat.st_ino and
592 return (stat.st_ino == self._socketstat.st_ino and
593 stat.st_mtime == self._socketstat.st_mtime)
593 stat.st_mtime == self._socketstat.st_mtime)
594 except OSError:
594 except OSError:
595 return False
595 return False
596
596
597 def unlinksocket(self, address):
597 def unlinksocket(self, address):
598 if not self._issocketowner():
598 if not self._issocketowner():
599 return
599 return
600 # it is possible to have a race condition here that we may
600 # it is possible to have a race condition here that we may
601 # remove another server's socket file. but that's okay
601 # remove another server's socket file. but that's okay
602 # since that server will detect and exit automatically and
602 # since that server will detect and exit automatically and
603 # the client will start a new server on demand.
603 # the client will start a new server on demand.
604 try:
604 try:
605 os.unlink(self._realaddress)
605 os.unlink(self._realaddress)
606 except OSError as exc:
606 except OSError as exc:
607 if exc.errno != errno.ENOENT:
607 if exc.errno != errno.ENOENT:
608 raise
608 raise
609
609
610 def printbanner(self, address):
610 def printbanner(self, address):
611 # no "listening at" message should be printed to simulate hg behavior
611 # no "listening at" message should be printed to simulate hg behavior
612 pass
612 pass
613
613
614 def shouldexit(self):
614 def shouldexit(self):
615 if not self._issocketowner():
615 if not self._issocketowner():
616 self.ui.debug('%s is not owned, exiting.\n' % self._realaddress)
616 self.ui.debug('%s is not owned, exiting.\n' % self._realaddress)
617 return True
617 return True
618 if time.time() - self._lastactive > self._idletimeout:
618 if time.time() - self._lastactive > self._idletimeout:
619 self.ui.debug('being idle too long. exiting.\n')
619 self.ui.debug('being idle too long. exiting.\n')
620 return True
620 return True
621 return False
621 return False
622
622
623 def newconnection(self):
623 def newconnection(self):
624 self._lastactive = time.time()
624 self._lastactive = time.time()
625
625
626 def createcmdserver(self, repo, conn, fin, fout):
626 def createcmdserver(self, repo, conn, fin, fout):
627 return chgcmdserver(self.ui, repo, fin, fout, conn,
627 return chgcmdserver(self.ui, repo, fin, fout, conn,
628 self._hashstate, self._baseaddress)
628 self._hashstate, self._baseaddress)
629
629
630 def chgunixservice(ui, repo, opts):
630 def chgunixservice(ui, repo, opts):
631 if repo:
631 if repo:
632 # one chgserver can serve multiple repos. drop repo infomation
632 # one chgserver can serve multiple repos. drop repo infomation
633 ui.setconfig('bundle', 'mainreporoot', '', 'repo')
633 ui.setconfig('bundle', 'mainreporoot', '', 'repo')
634 h = chgunixservicehandler(ui)
634 h = chgunixservicehandler(ui)
635 return commandserver.unixforkingservice(ui, repo=None, opts=opts, handler=h)
635 return commandserver.unixforkingservice(ui, repo=None, opts=opts, handler=h)
636
636
637 def uisetup(ui):
637 def uisetup(ui):
638 commandserver._servicemap['chgunix'] = chgunixservice
638 commandserver._servicemap['chgunix'] = chgunixservice
639
639
640 # CHGINTERNALMARK is temporarily set by chg client to detect if chg will
640 # CHGINTERNALMARK is temporarily set by chg client to detect if chg will
641 # start another chg. drop it to avoid possible side effects.
641 # start another chg. drop it to avoid possible side effects.
642 if 'CHGINTERNALMARK' in os.environ:
642 if 'CHGINTERNALMARK' in os.environ:
643 del os.environ['CHGINTERNALMARK']
643 del os.environ['CHGINTERNALMARK']
@@ -1,69 +1,69 b''
1 # Mercurial extension to provide the 'hg children' command
1 # Mercurial extension to provide the 'hg children' command
2 #
2 #
3 # Copyright 2007 by Intevation GmbH <intevation@intevation.de>
3 # Copyright 2007 by Intevation GmbH <intevation@intevation.de>
4 #
4 #
5 # Author(s):
5 # Author(s):
6 # Thomas Arendsen Hein <thomas@intevation.de>
6 # Thomas Arendsen Hein <thomas@intevation.de>
7 #
7 #
8 # This software may be used and distributed according to the terms of the
8 # This software may be used and distributed according to the terms of the
9 # GNU General Public License version 2 or any later version.
9 # GNU General Public License version 2 or any later version.
10
10
11 '''command to display child changesets (DEPRECATED)
11 '''command to display child changesets (DEPRECATED)
12
12
13 This extension is deprecated. You should use :hg:`log -r
13 This extension is deprecated. You should use :hg:`log -r
14 "children(REV)"` instead.
14 "children(REV)"` instead.
15 '''
15 '''
16
16
17 from __future__ import absolute_import
17 from __future__ import absolute_import
18
18
19 from mercurial.i18n import _
19 from mercurial.i18n import _
20 from mercurial import (
20 from mercurial import (
21 cmdutil,
21 cmdutil,
22 commands,
22 commands,
23 )
23 )
24
24
25 templateopts = commands.templateopts
25 templateopts = commands.templateopts
26
26
27 cmdtable = {}
27 cmdtable = {}
28 command = cmdutil.command(cmdtable)
28 command = cmdutil.command(cmdtable)
29 # Note for extension authors: ONLY specify testedwith = 'internal' for
29 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
30 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
30 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
31 # be specifying the version(s) of Mercurial they are tested with, or
31 # be specifying the version(s) of Mercurial they are tested with, or
32 # leave the attribute unspecified.
32 # leave the attribute unspecified.
33 testedwith = 'internal'
33 testedwith = 'ships-with-hg-core'
34
34
35 @command('children',
35 @command('children',
36 [('r', 'rev', '',
36 [('r', 'rev', '',
37 _('show children of the specified revision'), _('REV')),
37 _('show children of the specified revision'), _('REV')),
38 ] + templateopts,
38 ] + templateopts,
39 _('hg children [-r REV] [FILE]'),
39 _('hg children [-r REV] [FILE]'),
40 inferrepo=True)
40 inferrepo=True)
41 def children(ui, repo, file_=None, **opts):
41 def children(ui, repo, file_=None, **opts):
42 """show the children of the given or working directory revision
42 """show the children of the given or working directory revision
43
43
44 Print the children of the working directory's revisions. If a
44 Print the children of the working directory's revisions. If a
45 revision is given via -r/--rev, the children of that revision will
45 revision is given via -r/--rev, the children of that revision will
46 be printed. If a file argument is given, revision in which the
46 be printed. If a file argument is given, revision in which the
47 file was last changed (after the working directory revision or the
47 file was last changed (after the working directory revision or the
48 argument to --rev if given) is printed.
48 argument to --rev if given) is printed.
49
49
50 Please use :hg:`log` instead::
50 Please use :hg:`log` instead::
51
51
52 hg children => hg log -r "children()"
52 hg children => hg log -r "children()"
53 hg children -r REV => hg log -r "children(REV)"
53 hg children -r REV => hg log -r "children(REV)"
54
54
55 See :hg:`help log` and :hg:`help revsets.children`.
55 See :hg:`help log` and :hg:`help revsets.children`.
56
56
57 """
57 """
58 rev = opts.get('rev')
58 rev = opts.get('rev')
59 if file_:
59 if file_:
60 fctx = repo.filectx(file_, changeid=rev)
60 fctx = repo.filectx(file_, changeid=rev)
61 childctxs = [fcctx.changectx() for fcctx in fctx.children()]
61 childctxs = [fcctx.changectx() for fcctx in fctx.children()]
62 else:
62 else:
63 ctx = repo[rev]
63 ctx = repo[rev]
64 childctxs = ctx.children()
64 childctxs = ctx.children()
65
65
66 displayer = cmdutil.show_changeset(ui, repo, opts)
66 displayer = cmdutil.show_changeset(ui, repo, opts)
67 for cctx in childctxs:
67 for cctx in childctxs:
68 displayer.show(cctx)
68 displayer.show(cctx)
69 displayer.close()
69 displayer.close()
@@ -1,211 +1,211 b''
1 # churn.py - create a graph of revisions count grouped by template
1 # churn.py - create a graph of revisions count grouped by template
2 #
2 #
3 # Copyright 2006 Josef "Jeff" Sipek <jeffpc@josefsipek.net>
3 # Copyright 2006 Josef "Jeff" Sipek <jeffpc@josefsipek.net>
4 # Copyright 2008 Alexander Solovyov <piranha@piranha.org.ua>
4 # Copyright 2008 Alexander Solovyov <piranha@piranha.org.ua>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''command to display statistics about repository history'''
9 '''command to display statistics about repository history'''
10
10
11 from __future__ import absolute_import
11 from __future__ import absolute_import
12
12
13 import datetime
13 import datetime
14 import os
14 import os
15 import time
15 import time
16
16
17 from mercurial.i18n import _
17 from mercurial.i18n import _
18 from mercurial import (
18 from mercurial import (
19 cmdutil,
19 cmdutil,
20 commands,
20 commands,
21 encoding,
21 encoding,
22 patch,
22 patch,
23 scmutil,
23 scmutil,
24 util,
24 util,
25 )
25 )
26
26
27 cmdtable = {}
27 cmdtable = {}
28 command = cmdutil.command(cmdtable)
28 command = cmdutil.command(cmdtable)
29 # Note for extension authors: ONLY specify testedwith = 'internal' for
29 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
30 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
30 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
31 # be specifying the version(s) of Mercurial they are tested with, or
31 # be specifying the version(s) of Mercurial they are tested with, or
32 # leave the attribute unspecified.
32 # leave the attribute unspecified.
33 testedwith = 'internal'
33 testedwith = 'ships-with-hg-core'
34
34
35 def maketemplater(ui, repo, tmpl):
35 def maketemplater(ui, repo, tmpl):
36 return cmdutil.changeset_templater(ui, repo, False, None, tmpl, None, False)
36 return cmdutil.changeset_templater(ui, repo, False, None, tmpl, None, False)
37
37
38 def changedlines(ui, repo, ctx1, ctx2, fns):
38 def changedlines(ui, repo, ctx1, ctx2, fns):
39 added, removed = 0, 0
39 added, removed = 0, 0
40 fmatch = scmutil.matchfiles(repo, fns)
40 fmatch = scmutil.matchfiles(repo, fns)
41 diff = ''.join(patch.diff(repo, ctx1.node(), ctx2.node(), fmatch))
41 diff = ''.join(patch.diff(repo, ctx1.node(), ctx2.node(), fmatch))
42 for l in diff.split('\n'):
42 for l in diff.split('\n'):
43 if l.startswith("+") and not l.startswith("+++ "):
43 if l.startswith("+") and not l.startswith("+++ "):
44 added += 1
44 added += 1
45 elif l.startswith("-") and not l.startswith("--- "):
45 elif l.startswith("-") and not l.startswith("--- "):
46 removed += 1
46 removed += 1
47 return (added, removed)
47 return (added, removed)
48
48
49 def countrate(ui, repo, amap, *pats, **opts):
49 def countrate(ui, repo, amap, *pats, **opts):
50 """Calculate stats"""
50 """Calculate stats"""
51 if opts.get('dateformat'):
51 if opts.get('dateformat'):
52 def getkey(ctx):
52 def getkey(ctx):
53 t, tz = ctx.date()
53 t, tz = ctx.date()
54 date = datetime.datetime(*time.gmtime(float(t) - tz)[:6])
54 date = datetime.datetime(*time.gmtime(float(t) - tz)[:6])
55 return date.strftime(opts['dateformat'])
55 return date.strftime(opts['dateformat'])
56 else:
56 else:
57 tmpl = opts.get('oldtemplate') or opts.get('template')
57 tmpl = opts.get('oldtemplate') or opts.get('template')
58 tmpl = maketemplater(ui, repo, tmpl)
58 tmpl = maketemplater(ui, repo, tmpl)
59 def getkey(ctx):
59 def getkey(ctx):
60 ui.pushbuffer()
60 ui.pushbuffer()
61 tmpl.show(ctx)
61 tmpl.show(ctx)
62 return ui.popbuffer()
62 return ui.popbuffer()
63
63
64 state = {'count': 0}
64 state = {'count': 0}
65 rate = {}
65 rate = {}
66 df = False
66 df = False
67 if opts.get('date'):
67 if opts.get('date'):
68 df = util.matchdate(opts['date'])
68 df = util.matchdate(opts['date'])
69
69
70 m = scmutil.match(repo[None], pats, opts)
70 m = scmutil.match(repo[None], pats, opts)
71 def prep(ctx, fns):
71 def prep(ctx, fns):
72 rev = ctx.rev()
72 rev = ctx.rev()
73 if df and not df(ctx.date()[0]): # doesn't match date format
73 if df and not df(ctx.date()[0]): # doesn't match date format
74 return
74 return
75
75
76 key = getkey(ctx).strip()
76 key = getkey(ctx).strip()
77 key = amap.get(key, key) # alias remap
77 key = amap.get(key, key) # alias remap
78 if opts.get('changesets'):
78 if opts.get('changesets'):
79 rate[key] = (rate.get(key, (0,))[0] + 1, 0)
79 rate[key] = (rate.get(key, (0,))[0] + 1, 0)
80 else:
80 else:
81 parents = ctx.parents()
81 parents = ctx.parents()
82 if len(parents) > 1:
82 if len(parents) > 1:
83 ui.note(_('revision %d is a merge, ignoring...\n') % (rev,))
83 ui.note(_('revision %d is a merge, ignoring...\n') % (rev,))
84 return
84 return
85
85
86 ctx1 = parents[0]
86 ctx1 = parents[0]
87 lines = changedlines(ui, repo, ctx1, ctx, fns)
87 lines = changedlines(ui, repo, ctx1, ctx, fns)
88 rate[key] = [r + l for r, l in zip(rate.get(key, (0, 0)), lines)]
88 rate[key] = [r + l for r, l in zip(rate.get(key, (0, 0)), lines)]
89
89
90 state['count'] += 1
90 state['count'] += 1
91 ui.progress(_('analyzing'), state['count'], total=len(repo),
91 ui.progress(_('analyzing'), state['count'], total=len(repo),
92 unit=_('revisions'))
92 unit=_('revisions'))
93
93
94 for ctx in cmdutil.walkchangerevs(repo, m, opts, prep):
94 for ctx in cmdutil.walkchangerevs(repo, m, opts, prep):
95 continue
95 continue
96
96
97 ui.progress(_('analyzing'), None)
97 ui.progress(_('analyzing'), None)
98
98
99 return rate
99 return rate
100
100
101
101
102 @command('churn',
102 @command('churn',
103 [('r', 'rev', [],
103 [('r', 'rev', [],
104 _('count rate for the specified revision or revset'), _('REV')),
104 _('count rate for the specified revision or revset'), _('REV')),
105 ('d', 'date', '',
105 ('d', 'date', '',
106 _('count rate for revisions matching date spec'), _('DATE')),
106 _('count rate for revisions matching date spec'), _('DATE')),
107 ('t', 'oldtemplate', '',
107 ('t', 'oldtemplate', '',
108 _('template to group changesets (DEPRECATED)'), _('TEMPLATE')),
108 _('template to group changesets (DEPRECATED)'), _('TEMPLATE')),
109 ('T', 'template', '{author|email}',
109 ('T', 'template', '{author|email}',
110 _('template to group changesets'), _('TEMPLATE')),
110 _('template to group changesets'), _('TEMPLATE')),
111 ('f', 'dateformat', '',
111 ('f', 'dateformat', '',
112 _('strftime-compatible format for grouping by date'), _('FORMAT')),
112 _('strftime-compatible format for grouping by date'), _('FORMAT')),
113 ('c', 'changesets', False, _('count rate by number of changesets')),
113 ('c', 'changesets', False, _('count rate by number of changesets')),
114 ('s', 'sort', False, _('sort by key (default: sort by count)')),
114 ('s', 'sort', False, _('sort by key (default: sort by count)')),
115 ('', 'diffstat', False, _('display added/removed lines separately')),
115 ('', 'diffstat', False, _('display added/removed lines separately')),
116 ('', 'aliases', '', _('file with email aliases'), _('FILE')),
116 ('', 'aliases', '', _('file with email aliases'), _('FILE')),
117 ] + commands.walkopts,
117 ] + commands.walkopts,
118 _("hg churn [-d DATE] [-r REV] [--aliases FILE] [FILE]"),
118 _("hg churn [-d DATE] [-r REV] [--aliases FILE] [FILE]"),
119 inferrepo=True)
119 inferrepo=True)
120 def churn(ui, repo, *pats, **opts):
120 def churn(ui, repo, *pats, **opts):
121 '''histogram of changes to the repository
121 '''histogram of changes to the repository
122
122
123 This command will display a histogram representing the number
123 This command will display a histogram representing the number
124 of changed lines or revisions, grouped according to the given
124 of changed lines or revisions, grouped according to the given
125 template. The default template will group changes by author.
125 template. The default template will group changes by author.
126 The --dateformat option may be used to group the results by
126 The --dateformat option may be used to group the results by
127 date instead.
127 date instead.
128
128
129 Statistics are based on the number of changed lines, or
129 Statistics are based on the number of changed lines, or
130 alternatively the number of matching revisions if the
130 alternatively the number of matching revisions if the
131 --changesets option is specified.
131 --changesets option is specified.
132
132
133 Examples::
133 Examples::
134
134
135 # display count of changed lines for every committer
135 # display count of changed lines for every committer
136 hg churn -t "{author|email}"
136 hg churn -t "{author|email}"
137
137
138 # display daily activity graph
138 # display daily activity graph
139 hg churn -f "%H" -s -c
139 hg churn -f "%H" -s -c
140
140
141 # display activity of developers by month
141 # display activity of developers by month
142 hg churn -f "%Y-%m" -s -c
142 hg churn -f "%Y-%m" -s -c
143
143
144 # display count of lines changed in every year
144 # display count of lines changed in every year
145 hg churn -f "%Y" -s
145 hg churn -f "%Y" -s
146
146
147 It is possible to map alternate email addresses to a main address
147 It is possible to map alternate email addresses to a main address
148 by providing a file using the following format::
148 by providing a file using the following format::
149
149
150 <alias email> = <actual email>
150 <alias email> = <actual email>
151
151
152 Such a file may be specified with the --aliases option, otherwise
152 Such a file may be specified with the --aliases option, otherwise
153 a .hgchurn file will be looked for in the working directory root.
153 a .hgchurn file will be looked for in the working directory root.
154 Aliases will be split from the rightmost "=".
154 Aliases will be split from the rightmost "=".
155 '''
155 '''
156 def pad(s, l):
156 def pad(s, l):
157 return s + " " * (l - encoding.colwidth(s))
157 return s + " " * (l - encoding.colwidth(s))
158
158
159 amap = {}
159 amap = {}
160 aliases = opts.get('aliases')
160 aliases = opts.get('aliases')
161 if not aliases and os.path.exists(repo.wjoin('.hgchurn')):
161 if not aliases and os.path.exists(repo.wjoin('.hgchurn')):
162 aliases = repo.wjoin('.hgchurn')
162 aliases = repo.wjoin('.hgchurn')
163 if aliases:
163 if aliases:
164 for l in open(aliases, "r"):
164 for l in open(aliases, "r"):
165 try:
165 try:
166 alias, actual = l.rsplit('=' in l and '=' or None, 1)
166 alias, actual = l.rsplit('=' in l and '=' or None, 1)
167 amap[alias.strip()] = actual.strip()
167 amap[alias.strip()] = actual.strip()
168 except ValueError:
168 except ValueError:
169 l = l.strip()
169 l = l.strip()
170 if l:
170 if l:
171 ui.warn(_("skipping malformed alias: %s\n") % l)
171 ui.warn(_("skipping malformed alias: %s\n") % l)
172 continue
172 continue
173
173
174 rate = countrate(ui, repo, amap, *pats, **opts).items()
174 rate = countrate(ui, repo, amap, *pats, **opts).items()
175 if not rate:
175 if not rate:
176 return
176 return
177
177
178 if opts.get('sort'):
178 if opts.get('sort'):
179 rate.sort()
179 rate.sort()
180 else:
180 else:
181 rate.sort(key=lambda x: (-sum(x[1]), x))
181 rate.sort(key=lambda x: (-sum(x[1]), x))
182
182
183 # Be careful not to have a zero maxcount (issue833)
183 # Be careful not to have a zero maxcount (issue833)
184 maxcount = float(max(sum(v) for k, v in rate)) or 1.0
184 maxcount = float(max(sum(v) for k, v in rate)) or 1.0
185 maxname = max(len(k) for k, v in rate)
185 maxname = max(len(k) for k, v in rate)
186
186
187 ttywidth = ui.termwidth()
187 ttywidth = ui.termwidth()
188 ui.debug("assuming %i character terminal\n" % ttywidth)
188 ui.debug("assuming %i character terminal\n" % ttywidth)
189 width = ttywidth - maxname - 2 - 2 - 2
189 width = ttywidth - maxname - 2 - 2 - 2
190
190
191 if opts.get('diffstat'):
191 if opts.get('diffstat'):
192 width -= 15
192 width -= 15
193 def format(name, diffstat):
193 def format(name, diffstat):
194 added, removed = diffstat
194 added, removed = diffstat
195 return "%s %15s %s%s\n" % (pad(name, maxname),
195 return "%s %15s %s%s\n" % (pad(name, maxname),
196 '+%d/-%d' % (added, removed),
196 '+%d/-%d' % (added, removed),
197 ui.label('+' * charnum(added),
197 ui.label('+' * charnum(added),
198 'diffstat.inserted'),
198 'diffstat.inserted'),
199 ui.label('-' * charnum(removed),
199 ui.label('-' * charnum(removed),
200 'diffstat.deleted'))
200 'diffstat.deleted'))
201 else:
201 else:
202 width -= 6
202 width -= 6
203 def format(name, count):
203 def format(name, count):
204 return "%s %6d %s\n" % (pad(name, maxname), sum(count),
204 return "%s %6d %s\n" % (pad(name, maxname), sum(count),
205 '*' * charnum(sum(count)))
205 '*' * charnum(sum(count)))
206
206
207 def charnum(count):
207 def charnum(count):
208 return int(round(count * width / maxcount))
208 return int(round(count * width / maxcount))
209
209
210 for name, count in rate:
210 for name, count in rate:
211 ui.write(format(name, count))
211 ui.write(format(name, count))
@@ -1,186 +1,186 b''
1 # This software may be used and distributed according to the terms of the
1 # This software may be used and distributed according to the terms of the
2 # GNU General Public License version 2 or any later version.
2 # GNU General Public License version 2 or any later version.
3
3
4 """advertise pre-generated bundles to seed clones
4 """advertise pre-generated bundles to seed clones
5
5
6 "clonebundles" is a server-side extension used to advertise the existence
6 "clonebundles" is a server-side extension used to advertise the existence
7 of pre-generated, externally hosted bundle files to clients that are
7 of pre-generated, externally hosted bundle files to clients that are
8 cloning so that cloning can be faster, more reliable, and require less
8 cloning so that cloning can be faster, more reliable, and require less
9 resources on the server.
9 resources on the server.
10
10
11 Cloning can be a CPU and I/O intensive operation on servers. Traditionally,
11 Cloning can be a CPU and I/O intensive operation on servers. Traditionally,
12 the server, in response to a client's request to clone, dynamically generates
12 the server, in response to a client's request to clone, dynamically generates
13 a bundle containing the entire repository content and sends it to the client.
13 a bundle containing the entire repository content and sends it to the client.
14 There is no caching on the server and the server will have to redundantly
14 There is no caching on the server and the server will have to redundantly
15 generate the same outgoing bundle in response to each clone request. For
15 generate the same outgoing bundle in response to each clone request. For
16 servers with large repositories or with high clone volume, the load from
16 servers with large repositories or with high clone volume, the load from
17 clones can make scaling the server challenging and costly.
17 clones can make scaling the server challenging and costly.
18
18
19 This extension provides server operators the ability to offload potentially
19 This extension provides server operators the ability to offload potentially
20 expensive clone load to an external service. Here's how it works.
20 expensive clone load to an external service. Here's how it works.
21
21
22 1. A server operator establishes a mechanism for making bundle files available
22 1. A server operator establishes a mechanism for making bundle files available
23 on a hosting service where Mercurial clients can fetch them.
23 on a hosting service where Mercurial clients can fetch them.
24 2. A manifest file listing available bundle URLs and some optional metadata
24 2. A manifest file listing available bundle URLs and some optional metadata
25 is added to the Mercurial repository on the server.
25 is added to the Mercurial repository on the server.
26 3. A client initiates a clone against a clone bundles aware server.
26 3. A client initiates a clone against a clone bundles aware server.
27 4. The client sees the server is advertising clone bundles and fetches the
27 4. The client sees the server is advertising clone bundles and fetches the
28 manifest listing available bundles.
28 manifest listing available bundles.
29 5. The client filters and sorts the available bundles based on what it
29 5. The client filters and sorts the available bundles based on what it
30 supports and prefers.
30 supports and prefers.
31 6. The client downloads and applies an available bundle from the
31 6. The client downloads and applies an available bundle from the
32 server-specified URL.
32 server-specified URL.
33 7. The client reconnects to the original server and performs the equivalent
33 7. The client reconnects to the original server and performs the equivalent
34 of :hg:`pull` to retrieve all repository data not in the bundle. (The
34 of :hg:`pull` to retrieve all repository data not in the bundle. (The
35 repository could have been updated between when the bundle was created
35 repository could have been updated between when the bundle was created
36 and when the client started the clone.)
36 and when the client started the clone.)
37
37
38 Instead of the server generating full repository bundles for every clone
38 Instead of the server generating full repository bundles for every clone
39 request, it generates full bundles once and they are subsequently reused to
39 request, it generates full bundles once and they are subsequently reused to
40 bootstrap new clones. The server may still transfer data at clone time.
40 bootstrap new clones. The server may still transfer data at clone time.
41 However, this is only data that has been added/changed since the bundle was
41 However, this is only data that has been added/changed since the bundle was
42 created. For large, established repositories, this can reduce server load for
42 created. For large, established repositories, this can reduce server load for
43 clones to less than 1% of original.
43 clones to less than 1% of original.
44
44
45 To work, this extension requires the following of server operators:
45 To work, this extension requires the following of server operators:
46
46
47 * Generating bundle files of repository content (typically periodically,
47 * Generating bundle files of repository content (typically periodically,
48 such as once per day).
48 such as once per day).
49 * A file server that clients have network access to and that Python knows
49 * A file server that clients have network access to and that Python knows
50 how to talk to through its normal URL handling facility (typically an
50 how to talk to through its normal URL handling facility (typically an
51 HTTP server).
51 HTTP server).
52 * A process for keeping the bundles manifest in sync with available bundle
52 * A process for keeping the bundles manifest in sync with available bundle
53 files.
53 files.
54
54
55 Strictly speaking, using a static file hosting server isn't required: a server
55 Strictly speaking, using a static file hosting server isn't required: a server
56 operator could use a dynamic service for retrieving bundle data. However,
56 operator could use a dynamic service for retrieving bundle data. However,
57 static file hosting services are simple and scalable and should be sufficient
57 static file hosting services are simple and scalable and should be sufficient
58 for most needs.
58 for most needs.
59
59
60 Bundle files can be generated with the :hg:`bundle` command. Typically
60 Bundle files can be generated with the :hg:`bundle` command. Typically
61 :hg:`bundle --all` is used to produce a bundle of the entire repository.
61 :hg:`bundle --all` is used to produce a bundle of the entire repository.
62
62
63 :hg:`debugcreatestreamclonebundle` can be used to produce a special
63 :hg:`debugcreatestreamclonebundle` can be used to produce a special
64 *streaming clone bundle*. These are bundle files that are extremely efficient
64 *streaming clone bundle*. These are bundle files that are extremely efficient
65 to produce and consume (read: fast). However, they are larger than
65 to produce and consume (read: fast). However, they are larger than
66 traditional bundle formats and require that clients support the exact set
66 traditional bundle formats and require that clients support the exact set
67 of repository data store formats in use by the repository that created them.
67 of repository data store formats in use by the repository that created them.
68 Typically, a newer server can serve data that is compatible with older clients.
68 Typically, a newer server can serve data that is compatible with older clients.
69 However, *streaming clone bundles* don't have this guarantee. **Server
69 However, *streaming clone bundles* don't have this guarantee. **Server
70 operators need to be aware that newer versions of Mercurial may produce
70 operators need to be aware that newer versions of Mercurial may produce
71 streaming clone bundles incompatible with older Mercurial versions.**
71 streaming clone bundles incompatible with older Mercurial versions.**
72
72
73 A server operator is responsible for creating a ``.hg/clonebundles.manifest``
73 A server operator is responsible for creating a ``.hg/clonebundles.manifest``
74 file containing the list of available bundle files suitable for seeding
74 file containing the list of available bundle files suitable for seeding
75 clones. If this file does not exist, the repository will not advertise the
75 clones. If this file does not exist, the repository will not advertise the
76 existence of clone bundles when clients connect.
76 existence of clone bundles when clients connect.
77
77
78 The manifest file contains a newline (\n) delimited list of entries.
78 The manifest file contains a newline (\n) delimited list of entries.
79
79
80 Each line in this file defines an available bundle. Lines have the format:
80 Each line in this file defines an available bundle. Lines have the format:
81
81
82 <URL> [<key>=<value>[ <key>=<value>]]
82 <URL> [<key>=<value>[ <key>=<value>]]
83
83
84 That is, a URL followed by an optional, space-delimited list of key=value
84 That is, a URL followed by an optional, space-delimited list of key=value
85 pairs describing additional properties of this bundle. Both keys and values
85 pairs describing additional properties of this bundle. Both keys and values
86 are URI encoded.
86 are URI encoded.
87
87
88 Keys in UPPERCASE are reserved for use by Mercurial and are defined below.
88 Keys in UPPERCASE are reserved for use by Mercurial and are defined below.
89 All non-uppercase keys can be used by site installations. An example use
89 All non-uppercase keys can be used by site installations. An example use
90 for custom properties is to use the *datacenter* attribute to define which
90 for custom properties is to use the *datacenter* attribute to define which
91 data center a file is hosted in. Clients could then prefer a server in the
91 data center a file is hosted in. Clients could then prefer a server in the
92 data center closest to them.
92 data center closest to them.
93
93
94 The following reserved keys are currently defined:
94 The following reserved keys are currently defined:
95
95
96 BUNDLESPEC
96 BUNDLESPEC
97 A "bundle specification" string that describes the type of the bundle.
97 A "bundle specification" string that describes the type of the bundle.
98
98
99 These are string values that are accepted by the "--type" argument of
99 These are string values that are accepted by the "--type" argument of
100 :hg:`bundle`.
100 :hg:`bundle`.
101
101
102 The values are parsed in strict mode, which means they must be of the
102 The values are parsed in strict mode, which means they must be of the
103 "<compression>-<type>" form. See
103 "<compression>-<type>" form. See
104 mercurial.exchange.parsebundlespec() for more details.
104 mercurial.exchange.parsebundlespec() for more details.
105
105
106 :hg:`debugbundle --spec` can be used to print the bundle specification
106 :hg:`debugbundle --spec` can be used to print the bundle specification
107 string for a bundle file. The output of this command can be used verbatim
107 string for a bundle file. The output of this command can be used verbatim
108 for the value of ``BUNDLESPEC`` (it is already escaped).
108 for the value of ``BUNDLESPEC`` (it is already escaped).
109
109
110 Clients will automatically filter out specifications that are unknown or
110 Clients will automatically filter out specifications that are unknown or
111 unsupported so they won't attempt to download something that likely won't
111 unsupported so they won't attempt to download something that likely won't
112 apply.
112 apply.
113
113
114 The actual value doesn't impact client behavior beyond filtering:
114 The actual value doesn't impact client behavior beyond filtering:
115 clients will still sniff the bundle type from the header of downloaded
115 clients will still sniff the bundle type from the header of downloaded
116 files.
116 files.
117
117
118 **Use of this key is highly recommended**, as it allows clients to
118 **Use of this key is highly recommended**, as it allows clients to
119 easily skip unsupported bundles. If this key is not defined, an old
119 easily skip unsupported bundles. If this key is not defined, an old
120 client may attempt to apply a bundle that it is incapable of reading.
120 client may attempt to apply a bundle that it is incapable of reading.
121
121
122 REQUIRESNI
122 REQUIRESNI
123 Whether Server Name Indication (SNI) is required to connect to the URL.
123 Whether Server Name Indication (SNI) is required to connect to the URL.
124 SNI allows servers to use multiple certificates on the same IP. It is
124 SNI allows servers to use multiple certificates on the same IP. It is
125 somewhat common in CDNs and other hosting providers. Older Python
125 somewhat common in CDNs and other hosting providers. Older Python
126 versions do not support SNI. Defining this attribute enables clients
126 versions do not support SNI. Defining this attribute enables clients
127 with older Python versions to filter this entry without experiencing
127 with older Python versions to filter this entry without experiencing
128 an opaque SSL failure at connection time.
128 an opaque SSL failure at connection time.
129
129
130 If this is defined, it is important to advertise a non-SNI fallback
130 If this is defined, it is important to advertise a non-SNI fallback
131 URL or clients running old Python releases may not be able to clone
131 URL or clients running old Python releases may not be able to clone
132 with the clonebundles facility.
132 with the clonebundles facility.
133
133
134 Value should be "true".
134 Value should be "true".
135
135
136 Manifests can contain multiple entries. Assuming metadata is defined, clients
136 Manifests can contain multiple entries. Assuming metadata is defined, clients
137 will filter entries from the manifest that they don't support. The remaining
137 will filter entries from the manifest that they don't support. The remaining
138 entries are optionally sorted by client preferences
138 entries are optionally sorted by client preferences
139 (``experimental.clonebundleprefers`` config option). The client then attempts
139 (``experimental.clonebundleprefers`` config option). The client then attempts
140 to fetch the bundle at the first URL in the remaining list.
140 to fetch the bundle at the first URL in the remaining list.
141
141
142 **Errors when downloading a bundle will fail the entire clone operation:
142 **Errors when downloading a bundle will fail the entire clone operation:
143 clients do not automatically fall back to a traditional clone.** The reason
143 clients do not automatically fall back to a traditional clone.** The reason
144 for this is that if a server is using clone bundles, it is probably doing so
144 for this is that if a server is using clone bundles, it is probably doing so
145 because the feature is necessary to help it scale. In other words, there
145 because the feature is necessary to help it scale. In other words, there
146 is an assumption that clone load will be offloaded to another service and
146 is an assumption that clone load will be offloaded to another service and
147 that the Mercurial server isn't responsible for serving this clone load.
147 that the Mercurial server isn't responsible for serving this clone load.
148 If that other service experiences issues and clients start mass falling back to
148 If that other service experiences issues and clients start mass falling back to
149 the original Mercurial server, the added clone load could overwhelm the server
149 the original Mercurial server, the added clone load could overwhelm the server
150 due to unexpected load and effectively take it offline. Not having clients
150 due to unexpected load and effectively take it offline. Not having clients
151 automatically fall back to cloning from the original server mitigates this
151 automatically fall back to cloning from the original server mitigates this
152 scenario.
152 scenario.
153
153
154 Because there is no automatic Mercurial server fallback on failure of the
154 Because there is no automatic Mercurial server fallback on failure of the
155 bundle hosting service, it is important for server operators to view the bundle
155 bundle hosting service, it is important for server operators to view the bundle
156 hosting service as an extension of the Mercurial server in terms of
156 hosting service as an extension of the Mercurial server in terms of
157 availability and service level agreements: if the bundle hosting service goes
157 availability and service level agreements: if the bundle hosting service goes
158 down, so does the ability for clients to clone. Note: clients will see a
158 down, so does the ability for clients to clone. Note: clients will see a
159 message informing them how to bypass the clone bundles facility when a failure
159 message informing them how to bypass the clone bundles facility when a failure
160 occurs. So server operators should prepare for some people to follow these
160 occurs. So server operators should prepare for some people to follow these
161 instructions when a failure occurs, thus driving more load to the original
161 instructions when a failure occurs, thus driving more load to the original
162 Mercurial server when the bundle hosting service fails.
162 Mercurial server when the bundle hosting service fails.
163 """
163 """
164
164
165 from __future__ import absolute_import
165 from __future__ import absolute_import
166
166
167 from mercurial import (
167 from mercurial import (
168 extensions,
168 extensions,
169 wireproto,
169 wireproto,
170 )
170 )
171
171
172 testedwith = 'internal'
172 testedwith = 'ships-with-hg-core'
173
173
174 def capabilities(orig, repo, proto):
174 def capabilities(orig, repo, proto):
175 caps = orig(repo, proto)
175 caps = orig(repo, proto)
176
176
177 # Only advertise if a manifest exists. This does add some I/O to requests.
177 # Only advertise if a manifest exists. This does add some I/O to requests.
178 # But this should be cheaper than a wasted network round trip due to
178 # But this should be cheaper than a wasted network round trip due to
179 # missing file.
179 # missing file.
180 if repo.opener.exists('clonebundles.manifest'):
180 if repo.opener.exists('clonebundles.manifest'):
181 caps.append('clonebundles')
181 caps.append('clonebundles')
182
182
183 return caps
183 return caps
184
184
185 def extsetup(ui):
185 def extsetup(ui):
186 extensions.wrapfunction(wireproto, '_capabilities', capabilities)
186 extensions.wrapfunction(wireproto, '_capabilities', capabilities)
@@ -1,665 +1,665 b''
1 # color.py color output for Mercurial commands
1 # color.py color output for Mercurial commands
2 #
2 #
3 # Copyright (C) 2007 Kevin Christen <kevin.christen@gmail.com>
3 # Copyright (C) 2007 Kevin Christen <kevin.christen@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 '''colorize output from some commands
8 '''colorize output from some commands
9
9
10 The color extension colorizes output from several Mercurial commands.
10 The color extension colorizes output from several Mercurial commands.
11 For example, the diff command shows additions in green and deletions
11 For example, the diff command shows additions in green and deletions
12 in red, while the status command shows modified files in magenta. Many
12 in red, while the status command shows modified files in magenta. Many
13 other commands have analogous colors. It is possible to customize
13 other commands have analogous colors. It is possible to customize
14 these colors.
14 these colors.
15
15
16 Effects
16 Effects
17 -------
17 -------
18
18
19 Other effects in addition to color, like bold and underlined text, are
19 Other effects in addition to color, like bold and underlined text, are
20 also available. By default, the terminfo database is used to find the
20 also available. By default, the terminfo database is used to find the
21 terminal codes used to change color and effect. If terminfo is not
21 terminal codes used to change color and effect. If terminfo is not
22 available, then effects are rendered with the ECMA-48 SGR control
22 available, then effects are rendered with the ECMA-48 SGR control
23 function (aka ANSI escape codes).
23 function (aka ANSI escape codes).
24
24
25 The available effects in terminfo mode are 'blink', 'bold', 'dim',
25 The available effects in terminfo mode are 'blink', 'bold', 'dim',
26 'inverse', 'invisible', 'italic', 'standout', and 'underline'; in
26 'inverse', 'invisible', 'italic', 'standout', and 'underline'; in
27 ECMA-48 mode, the options are 'bold', 'inverse', 'italic', and
27 ECMA-48 mode, the options are 'bold', 'inverse', 'italic', and
28 'underline'. How each is rendered depends on the terminal emulator.
28 'underline'. How each is rendered depends on the terminal emulator.
29 Some may not be available for a given terminal type, and will be
29 Some may not be available for a given terminal type, and will be
30 silently ignored.
30 silently ignored.
31
31
32 Labels
32 Labels
33 ------
33 ------
34
34
35 Text receives color effects depending on the labels that it has. Many
35 Text receives color effects depending on the labels that it has. Many
36 default Mercurial commands emit labelled text. You can also define
36 default Mercurial commands emit labelled text. You can also define
37 your own labels in templates using the label function, see :hg:`help
37 your own labels in templates using the label function, see :hg:`help
38 templates`. A single portion of text may have more than one label. In
38 templates`. A single portion of text may have more than one label. In
39 that case, effects given to the last label will override any other
39 that case, effects given to the last label will override any other
40 effects. This includes the special "none" effect, which nullifies
40 effects. This includes the special "none" effect, which nullifies
41 other effects.
41 other effects.
42
42
43 Labels are normally invisible. In order to see these labels and their
43 Labels are normally invisible. In order to see these labels and their
44 position in the text, use the global --color=debug option. The same
44 position in the text, use the global --color=debug option. The same
45 anchor text may be associated to multiple labels, e.g.
45 anchor text may be associated to multiple labels, e.g.
46
46
47 [log.changeset changeset.secret|changeset: 22611:6f0a53c8f587]
47 [log.changeset changeset.secret|changeset: 22611:6f0a53c8f587]
48
48
49 The following are the default effects for some default labels. Default
49 The following are the default effects for some default labels. Default
50 effects may be overridden from your configuration file::
50 effects may be overridden from your configuration file::
51
51
52 [color]
52 [color]
53 status.modified = blue bold underline red_background
53 status.modified = blue bold underline red_background
54 status.added = green bold
54 status.added = green bold
55 status.removed = red bold blue_background
55 status.removed = red bold blue_background
56 status.deleted = cyan bold underline
56 status.deleted = cyan bold underline
57 status.unknown = magenta bold underline
57 status.unknown = magenta bold underline
58 status.ignored = black bold
58 status.ignored = black bold
59
59
60 # 'none' turns off all effects
60 # 'none' turns off all effects
61 status.clean = none
61 status.clean = none
62 status.copied = none
62 status.copied = none
63
63
64 qseries.applied = blue bold underline
64 qseries.applied = blue bold underline
65 qseries.unapplied = black bold
65 qseries.unapplied = black bold
66 qseries.missing = red bold
66 qseries.missing = red bold
67
67
68 diff.diffline = bold
68 diff.diffline = bold
69 diff.extended = cyan bold
69 diff.extended = cyan bold
70 diff.file_a = red bold
70 diff.file_a = red bold
71 diff.file_b = green bold
71 diff.file_b = green bold
72 diff.hunk = magenta
72 diff.hunk = magenta
73 diff.deleted = red
73 diff.deleted = red
74 diff.inserted = green
74 diff.inserted = green
75 diff.changed = white
75 diff.changed = white
76 diff.tab =
76 diff.tab =
77 diff.trailingwhitespace = bold red_background
77 diff.trailingwhitespace = bold red_background
78
78
79 # Blank so it inherits the style of the surrounding label
79 # Blank so it inherits the style of the surrounding label
80 changeset.public =
80 changeset.public =
81 changeset.draft =
81 changeset.draft =
82 changeset.secret =
82 changeset.secret =
83
83
84 resolve.unresolved = red bold
84 resolve.unresolved = red bold
85 resolve.resolved = green bold
85 resolve.resolved = green bold
86
86
87 bookmarks.active = green
87 bookmarks.active = green
88
88
89 branches.active = none
89 branches.active = none
90 branches.closed = black bold
90 branches.closed = black bold
91 branches.current = green
91 branches.current = green
92 branches.inactive = none
92 branches.inactive = none
93
93
94 tags.normal = green
94 tags.normal = green
95 tags.local = black bold
95 tags.local = black bold
96
96
97 rebase.rebased = blue
97 rebase.rebased = blue
98 rebase.remaining = red bold
98 rebase.remaining = red bold
99
99
100 shelve.age = cyan
100 shelve.age = cyan
101 shelve.newest = green bold
101 shelve.newest = green bold
102 shelve.name = blue bold
102 shelve.name = blue bold
103
103
104 histedit.remaining = red bold
104 histedit.remaining = red bold
105
105
106 Custom colors
106 Custom colors
107 -------------
107 -------------
108
108
109 Because there are only eight standard colors, this module allows you
109 Because there are only eight standard colors, this module allows you
110 to define color names for other color slots which might be available
110 to define color names for other color slots which might be available
111 for your terminal type, assuming terminfo mode. For instance::
111 for your terminal type, assuming terminfo mode. For instance::
112
112
113 color.brightblue = 12
113 color.brightblue = 12
114 color.pink = 207
114 color.pink = 207
115 color.orange = 202
115 color.orange = 202
116
116
117 to set 'brightblue' to color slot 12 (useful for 16 color terminals
117 to set 'brightblue' to color slot 12 (useful for 16 color terminals
118 that have brighter colors defined in the upper eight) and, 'pink' and
118 that have brighter colors defined in the upper eight) and, 'pink' and
119 'orange' to colors in 256-color xterm's default color cube. These
119 'orange' to colors in 256-color xterm's default color cube. These
120 defined colors may then be used as any of the pre-defined eight,
120 defined colors may then be used as any of the pre-defined eight,
121 including appending '_background' to set the background to that color.
121 including appending '_background' to set the background to that color.
122
122
123 Modes
123 Modes
124 -----
124 -----
125
125
126 By default, the color extension will use ANSI mode (or win32 mode on
126 By default, the color extension will use ANSI mode (or win32 mode on
127 Windows) if it detects a terminal. To override auto mode (to enable
127 Windows) if it detects a terminal. To override auto mode (to enable
128 terminfo mode, for example), set the following configuration option::
128 terminfo mode, for example), set the following configuration option::
129
129
130 [color]
130 [color]
131 mode = terminfo
131 mode = terminfo
132
132
133 Any value other than 'ansi', 'win32', 'terminfo', or 'auto' will
133 Any value other than 'ansi', 'win32', 'terminfo', or 'auto' will
134 disable color.
134 disable color.
135
135
136 Note that on some systems, terminfo mode may cause problems when using
136 Note that on some systems, terminfo mode may cause problems when using
137 color with the pager extension and less -R. less with the -R option
137 color with the pager extension and less -R. less with the -R option
138 will only display ECMA-48 color codes, and terminfo mode may sometimes
138 will only display ECMA-48 color codes, and terminfo mode may sometimes
139 emit codes that less doesn't understand. You can work around this by
139 emit codes that less doesn't understand. You can work around this by
140 either using ansi mode (or auto mode), or by using less -r (which will
140 either using ansi mode (or auto mode), or by using less -r (which will
141 pass through all terminal control codes, not just color control
141 pass through all terminal control codes, not just color control
142 codes).
142 codes).
143
143
144 On some systems (such as MSYS in Windows), the terminal may support
144 On some systems (such as MSYS in Windows), the terminal may support
145 a different color mode than the pager (activated via the "pager"
145 a different color mode than the pager (activated via the "pager"
146 extension). It is possible to define separate modes depending on whether
146 extension). It is possible to define separate modes depending on whether
147 the pager is active::
147 the pager is active::
148
148
149 [color]
149 [color]
150 mode = auto
150 mode = auto
151 pagermode = ansi
151 pagermode = ansi
152
152
153 If ``pagermode`` is not defined, the ``mode`` will be used.
153 If ``pagermode`` is not defined, the ``mode`` will be used.
154 '''
154 '''
155
155
156 from __future__ import absolute_import
156 from __future__ import absolute_import
157
157
158 import os
158 import os
159
159
160 from mercurial.i18n import _
160 from mercurial.i18n import _
161 from mercurial import (
161 from mercurial import (
162 cmdutil,
162 cmdutil,
163 commands,
163 commands,
164 dispatch,
164 dispatch,
165 extensions,
165 extensions,
166 subrepo,
166 subrepo,
167 ui as uimod,
167 ui as uimod,
168 util,
168 util,
169 )
169 )
170
170
171 cmdtable = {}
171 cmdtable = {}
172 command = cmdutil.command(cmdtable)
172 command = cmdutil.command(cmdtable)
173 # Note for extension authors: ONLY specify testedwith = 'internal' for
173 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
174 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
174 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
175 # be specifying the version(s) of Mercurial they are tested with, or
175 # be specifying the version(s) of Mercurial they are tested with, or
176 # leave the attribute unspecified.
176 # leave the attribute unspecified.
177 testedwith = 'internal'
177 testedwith = 'ships-with-hg-core'
178
178
179 # start and stop parameters for effects
179 # start and stop parameters for effects
180 _effects = {'none': 0, 'black': 30, 'red': 31, 'green': 32, 'yellow': 33,
180 _effects = {'none': 0, 'black': 30, 'red': 31, 'green': 32, 'yellow': 33,
181 'blue': 34, 'magenta': 35, 'cyan': 36, 'white': 37, 'bold': 1,
181 'blue': 34, 'magenta': 35, 'cyan': 36, 'white': 37, 'bold': 1,
182 'italic': 3, 'underline': 4, 'inverse': 7, 'dim': 2,
182 'italic': 3, 'underline': 4, 'inverse': 7, 'dim': 2,
183 'black_background': 40, 'red_background': 41,
183 'black_background': 40, 'red_background': 41,
184 'green_background': 42, 'yellow_background': 43,
184 'green_background': 42, 'yellow_background': 43,
185 'blue_background': 44, 'purple_background': 45,
185 'blue_background': 44, 'purple_background': 45,
186 'cyan_background': 46, 'white_background': 47}
186 'cyan_background': 46, 'white_background': 47}
187
187
188 def _terminfosetup(ui, mode):
188 def _terminfosetup(ui, mode):
189 '''Initialize terminfo data and the terminal if we're in terminfo mode.'''
189 '''Initialize terminfo data and the terminal if we're in terminfo mode.'''
190
190
191 global _terminfo_params
191 global _terminfo_params
192 # If we failed to load curses, we go ahead and return.
192 # If we failed to load curses, we go ahead and return.
193 if not _terminfo_params:
193 if not _terminfo_params:
194 return
194 return
195 # Otherwise, see what the config file says.
195 # Otherwise, see what the config file says.
196 if mode not in ('auto', 'terminfo'):
196 if mode not in ('auto', 'terminfo'):
197 return
197 return
198
198
199 _terminfo_params.update((key[6:], (False, int(val)))
199 _terminfo_params.update((key[6:], (False, int(val)))
200 for key, val in ui.configitems('color')
200 for key, val in ui.configitems('color')
201 if key.startswith('color.'))
201 if key.startswith('color.'))
202
202
203 try:
203 try:
204 curses.setupterm()
204 curses.setupterm()
205 except curses.error as e:
205 except curses.error as e:
206 _terminfo_params = {}
206 _terminfo_params = {}
207 return
207 return
208
208
209 for key, (b, e) in _terminfo_params.items():
209 for key, (b, e) in _terminfo_params.items():
210 if not b:
210 if not b:
211 continue
211 continue
212 if not curses.tigetstr(e):
212 if not curses.tigetstr(e):
213 # Most terminals don't support dim, invis, etc, so don't be
213 # Most terminals don't support dim, invis, etc, so don't be
214 # noisy and use ui.debug().
214 # noisy and use ui.debug().
215 ui.debug("no terminfo entry for %s\n" % e)
215 ui.debug("no terminfo entry for %s\n" % e)
216 del _terminfo_params[key]
216 del _terminfo_params[key]
217 if not curses.tigetstr('setaf') or not curses.tigetstr('setab'):
217 if not curses.tigetstr('setaf') or not curses.tigetstr('setab'):
218 # Only warn about missing terminfo entries if we explicitly asked for
218 # Only warn about missing terminfo entries if we explicitly asked for
219 # terminfo mode.
219 # terminfo mode.
220 if mode == "terminfo":
220 if mode == "terminfo":
221 ui.warn(_("no terminfo entry for setab/setaf: reverting to "
221 ui.warn(_("no terminfo entry for setab/setaf: reverting to "
222 "ECMA-48 color\n"))
222 "ECMA-48 color\n"))
223 _terminfo_params = {}
223 _terminfo_params = {}
224
224
225 def _modesetup(ui, coloropt):
225 def _modesetup(ui, coloropt):
226 global _terminfo_params
226 global _terminfo_params
227
227
228 if coloropt == 'debug':
228 if coloropt == 'debug':
229 return 'debug'
229 return 'debug'
230
230
231 auto = (coloropt == 'auto')
231 auto = (coloropt == 'auto')
232 always = not auto and util.parsebool(coloropt)
232 always = not auto and util.parsebool(coloropt)
233 if not always and not auto:
233 if not always and not auto:
234 return None
234 return None
235
235
236 formatted = always or (os.environ.get('TERM') != 'dumb' and ui.formatted())
236 formatted = always or (os.environ.get('TERM') != 'dumb' and ui.formatted())
237
237
238 mode = ui.config('color', 'mode', 'auto')
238 mode = ui.config('color', 'mode', 'auto')
239
239
240 # If pager is active, color.pagermode overrides color.mode.
240 # If pager is active, color.pagermode overrides color.mode.
241 if getattr(ui, 'pageractive', False):
241 if getattr(ui, 'pageractive', False):
242 mode = ui.config('color', 'pagermode', mode)
242 mode = ui.config('color', 'pagermode', mode)
243
243
244 realmode = mode
244 realmode = mode
245 if mode == 'auto':
245 if mode == 'auto':
246 if os.name == 'nt':
246 if os.name == 'nt':
247 term = os.environ.get('TERM')
247 term = os.environ.get('TERM')
248 # TERM won't be defined in a vanilla cmd.exe environment.
248 # TERM won't be defined in a vanilla cmd.exe environment.
249
249
250 # UNIX-like environments on Windows such as Cygwin and MSYS will
250 # UNIX-like environments on Windows such as Cygwin and MSYS will
251 # set TERM. They appear to make a best effort attempt at setting it
251 # set TERM. They appear to make a best effort attempt at setting it
252 # to something appropriate. However, not all environments with TERM
252 # to something appropriate. However, not all environments with TERM
253 # defined support ANSI. Since "ansi" could result in terminal
253 # defined support ANSI. Since "ansi" could result in terminal
254 # gibberish, we error on the side of selecting "win32". However, if
254 # gibberish, we error on the side of selecting "win32". However, if
255 # w32effects is not defined, we almost certainly don't support
255 # w32effects is not defined, we almost certainly don't support
256 # "win32", so don't even try.
256 # "win32", so don't even try.
257 if (term and 'xterm' in term) or not w32effects:
257 if (term and 'xterm' in term) or not w32effects:
258 realmode = 'ansi'
258 realmode = 'ansi'
259 else:
259 else:
260 realmode = 'win32'
260 realmode = 'win32'
261 else:
261 else:
262 realmode = 'ansi'
262 realmode = 'ansi'
263
263
264 def modewarn():
264 def modewarn():
265 # only warn if color.mode was explicitly set and we're in
265 # only warn if color.mode was explicitly set and we're in
266 # an interactive terminal
266 # an interactive terminal
267 if mode == realmode and ui.interactive():
267 if mode == realmode and ui.interactive():
268 ui.warn(_('warning: failed to set color mode to %s\n') % mode)
268 ui.warn(_('warning: failed to set color mode to %s\n') % mode)
269
269
270 if realmode == 'win32':
270 if realmode == 'win32':
271 _terminfo_params = {}
271 _terminfo_params = {}
272 if not w32effects:
272 if not w32effects:
273 modewarn()
273 modewarn()
274 return None
274 return None
275 _effects.update(w32effects)
275 _effects.update(w32effects)
276 elif realmode == 'ansi':
276 elif realmode == 'ansi':
277 _terminfo_params = {}
277 _terminfo_params = {}
278 elif realmode == 'terminfo':
278 elif realmode == 'terminfo':
279 _terminfosetup(ui, mode)
279 _terminfosetup(ui, mode)
280 if not _terminfo_params:
280 if not _terminfo_params:
281 ## FIXME Shouldn't we return None in this case too?
281 ## FIXME Shouldn't we return None in this case too?
282 modewarn()
282 modewarn()
283 realmode = 'ansi'
283 realmode = 'ansi'
284 else:
284 else:
285 return None
285 return None
286
286
287 if always or (auto and formatted):
287 if always or (auto and formatted):
288 return realmode
288 return realmode
289 return None
289 return None
290
290
291 try:
291 try:
292 import curses
292 import curses
293 # Mapping from effect name to terminfo attribute name or color number.
293 # Mapping from effect name to terminfo attribute name or color number.
294 # This will also force-load the curses module.
294 # This will also force-load the curses module.
295 _terminfo_params = {'none': (True, 'sgr0'),
295 _terminfo_params = {'none': (True, 'sgr0'),
296 'standout': (True, 'smso'),
296 'standout': (True, 'smso'),
297 'underline': (True, 'smul'),
297 'underline': (True, 'smul'),
298 'reverse': (True, 'rev'),
298 'reverse': (True, 'rev'),
299 'inverse': (True, 'rev'),
299 'inverse': (True, 'rev'),
300 'blink': (True, 'blink'),
300 'blink': (True, 'blink'),
301 'dim': (True, 'dim'),
301 'dim': (True, 'dim'),
302 'bold': (True, 'bold'),
302 'bold': (True, 'bold'),
303 'invisible': (True, 'invis'),
303 'invisible': (True, 'invis'),
304 'italic': (True, 'sitm'),
304 'italic': (True, 'sitm'),
305 'black': (False, curses.COLOR_BLACK),
305 'black': (False, curses.COLOR_BLACK),
306 'red': (False, curses.COLOR_RED),
306 'red': (False, curses.COLOR_RED),
307 'green': (False, curses.COLOR_GREEN),
307 'green': (False, curses.COLOR_GREEN),
308 'yellow': (False, curses.COLOR_YELLOW),
308 'yellow': (False, curses.COLOR_YELLOW),
309 'blue': (False, curses.COLOR_BLUE),
309 'blue': (False, curses.COLOR_BLUE),
310 'magenta': (False, curses.COLOR_MAGENTA),
310 'magenta': (False, curses.COLOR_MAGENTA),
311 'cyan': (False, curses.COLOR_CYAN),
311 'cyan': (False, curses.COLOR_CYAN),
312 'white': (False, curses.COLOR_WHITE)}
312 'white': (False, curses.COLOR_WHITE)}
313 except ImportError:
313 except ImportError:
314 _terminfo_params = {}
314 _terminfo_params = {}
315
315
316 _styles = {'grep.match': 'red bold',
316 _styles = {'grep.match': 'red bold',
317 'grep.linenumber': 'green',
317 'grep.linenumber': 'green',
318 'grep.rev': 'green',
318 'grep.rev': 'green',
319 'grep.change': 'green',
319 'grep.change': 'green',
320 'grep.sep': 'cyan',
320 'grep.sep': 'cyan',
321 'grep.filename': 'magenta',
321 'grep.filename': 'magenta',
322 'grep.user': 'magenta',
322 'grep.user': 'magenta',
323 'grep.date': 'magenta',
323 'grep.date': 'magenta',
324 'bookmarks.active': 'green',
324 'bookmarks.active': 'green',
325 'branches.active': 'none',
325 'branches.active': 'none',
326 'branches.closed': 'black bold',
326 'branches.closed': 'black bold',
327 'branches.current': 'green',
327 'branches.current': 'green',
328 'branches.inactive': 'none',
328 'branches.inactive': 'none',
329 'diff.changed': 'white',
329 'diff.changed': 'white',
330 'diff.deleted': 'red',
330 'diff.deleted': 'red',
331 'diff.diffline': 'bold',
331 'diff.diffline': 'bold',
332 'diff.extended': 'cyan bold',
332 'diff.extended': 'cyan bold',
333 'diff.file_a': 'red bold',
333 'diff.file_a': 'red bold',
334 'diff.file_b': 'green bold',
334 'diff.file_b': 'green bold',
335 'diff.hunk': 'magenta',
335 'diff.hunk': 'magenta',
336 'diff.inserted': 'green',
336 'diff.inserted': 'green',
337 'diff.tab': '',
337 'diff.tab': '',
338 'diff.trailingwhitespace': 'bold red_background',
338 'diff.trailingwhitespace': 'bold red_background',
339 'changeset.public' : '',
339 'changeset.public' : '',
340 'changeset.draft' : '',
340 'changeset.draft' : '',
341 'changeset.secret' : '',
341 'changeset.secret' : '',
342 'diffstat.deleted': 'red',
342 'diffstat.deleted': 'red',
343 'diffstat.inserted': 'green',
343 'diffstat.inserted': 'green',
344 'histedit.remaining': 'red bold',
344 'histedit.remaining': 'red bold',
345 'ui.prompt': 'yellow',
345 'ui.prompt': 'yellow',
346 'log.changeset': 'yellow',
346 'log.changeset': 'yellow',
347 'patchbomb.finalsummary': '',
347 'patchbomb.finalsummary': '',
348 'patchbomb.from': 'magenta',
348 'patchbomb.from': 'magenta',
349 'patchbomb.to': 'cyan',
349 'patchbomb.to': 'cyan',
350 'patchbomb.subject': 'green',
350 'patchbomb.subject': 'green',
351 'patchbomb.diffstats': '',
351 'patchbomb.diffstats': '',
352 'rebase.rebased': 'blue',
352 'rebase.rebased': 'blue',
353 'rebase.remaining': 'red bold',
353 'rebase.remaining': 'red bold',
354 'resolve.resolved': 'green bold',
354 'resolve.resolved': 'green bold',
355 'resolve.unresolved': 'red bold',
355 'resolve.unresolved': 'red bold',
356 'shelve.age': 'cyan',
356 'shelve.age': 'cyan',
357 'shelve.newest': 'green bold',
357 'shelve.newest': 'green bold',
358 'shelve.name': 'blue bold',
358 'shelve.name': 'blue bold',
359 'status.added': 'green bold',
359 'status.added': 'green bold',
360 'status.clean': 'none',
360 'status.clean': 'none',
361 'status.copied': 'none',
361 'status.copied': 'none',
362 'status.deleted': 'cyan bold underline',
362 'status.deleted': 'cyan bold underline',
363 'status.ignored': 'black bold',
363 'status.ignored': 'black bold',
364 'status.modified': 'blue bold',
364 'status.modified': 'blue bold',
365 'status.removed': 'red bold',
365 'status.removed': 'red bold',
366 'status.unknown': 'magenta bold underline',
366 'status.unknown': 'magenta bold underline',
367 'tags.normal': 'green',
367 'tags.normal': 'green',
368 'tags.local': 'black bold'}
368 'tags.local': 'black bold'}
369
369
370
370
371 def _effect_str(effect):
371 def _effect_str(effect):
372 '''Helper function for render_effects().'''
372 '''Helper function for render_effects().'''
373
373
374 bg = False
374 bg = False
375 if effect.endswith('_background'):
375 if effect.endswith('_background'):
376 bg = True
376 bg = True
377 effect = effect[:-11]
377 effect = effect[:-11]
378 attr, val = _terminfo_params[effect]
378 attr, val = _terminfo_params[effect]
379 if attr:
379 if attr:
380 return curses.tigetstr(val)
380 return curses.tigetstr(val)
381 elif bg:
381 elif bg:
382 return curses.tparm(curses.tigetstr('setab'), val)
382 return curses.tparm(curses.tigetstr('setab'), val)
383 else:
383 else:
384 return curses.tparm(curses.tigetstr('setaf'), val)
384 return curses.tparm(curses.tigetstr('setaf'), val)
385
385
386 def render_effects(text, effects):
386 def render_effects(text, effects):
387 'Wrap text in commands to turn on each effect.'
387 'Wrap text in commands to turn on each effect.'
388 if not text:
388 if not text:
389 return text
389 return text
390 if not _terminfo_params:
390 if not _terminfo_params:
391 start = [str(_effects[e]) for e in ['none'] + effects.split()]
391 start = [str(_effects[e]) for e in ['none'] + effects.split()]
392 start = '\033[' + ';'.join(start) + 'm'
392 start = '\033[' + ';'.join(start) + 'm'
393 stop = '\033[' + str(_effects['none']) + 'm'
393 stop = '\033[' + str(_effects['none']) + 'm'
394 else:
394 else:
395 start = ''.join(_effect_str(effect)
395 start = ''.join(_effect_str(effect)
396 for effect in ['none'] + effects.split())
396 for effect in ['none'] + effects.split())
397 stop = _effect_str('none')
397 stop = _effect_str('none')
398 return ''.join([start, text, stop])
398 return ''.join([start, text, stop])
399
399
400 def extstyles():
400 def extstyles():
401 for name, ext in extensions.extensions():
401 for name, ext in extensions.extensions():
402 _styles.update(getattr(ext, 'colortable', {}))
402 _styles.update(getattr(ext, 'colortable', {}))
403
403
404 def valideffect(effect):
404 def valideffect(effect):
405 'Determine if the effect is valid or not.'
405 'Determine if the effect is valid or not.'
406 good = False
406 good = False
407 if not _terminfo_params and effect in _effects:
407 if not _terminfo_params and effect in _effects:
408 good = True
408 good = True
409 elif effect in _terminfo_params or effect[:-11] in _terminfo_params:
409 elif effect in _terminfo_params or effect[:-11] in _terminfo_params:
410 good = True
410 good = True
411 return good
411 return good
412
412
413 def configstyles(ui):
413 def configstyles(ui):
414 for status, cfgeffects in ui.configitems('color'):
414 for status, cfgeffects in ui.configitems('color'):
415 if '.' not in status or status.startswith('color.'):
415 if '.' not in status or status.startswith('color.'):
416 continue
416 continue
417 cfgeffects = ui.configlist('color', status)
417 cfgeffects = ui.configlist('color', status)
418 if cfgeffects:
418 if cfgeffects:
419 good = []
419 good = []
420 for e in cfgeffects:
420 for e in cfgeffects:
421 if valideffect(e):
421 if valideffect(e):
422 good.append(e)
422 good.append(e)
423 else:
423 else:
424 ui.warn(_("ignoring unknown color/effect %r "
424 ui.warn(_("ignoring unknown color/effect %r "
425 "(configured in color.%s)\n")
425 "(configured in color.%s)\n")
426 % (e, status))
426 % (e, status))
427 _styles[status] = ' '.join(good)
427 _styles[status] = ' '.join(good)
428
428
429 class colorui(uimod.ui):
429 class colorui(uimod.ui):
430 _colormode = 'ansi'
430 _colormode = 'ansi'
431 def write(self, *args, **opts):
431 def write(self, *args, **opts):
432 if self._colormode is None:
432 if self._colormode is None:
433 return super(colorui, self).write(*args, **opts)
433 return super(colorui, self).write(*args, **opts)
434
434
435 label = opts.get('label', '')
435 label = opts.get('label', '')
436 if self._buffers and not opts.get('prompt', False):
436 if self._buffers and not opts.get('prompt', False):
437 if self._bufferapplylabels:
437 if self._bufferapplylabels:
438 self._buffers[-1].extend(self.label(a, label) for a in args)
438 self._buffers[-1].extend(self.label(a, label) for a in args)
439 else:
439 else:
440 self._buffers[-1].extend(args)
440 self._buffers[-1].extend(args)
441 elif self._colormode == 'win32':
441 elif self._colormode == 'win32':
442 for a in args:
442 for a in args:
443 win32print(a, super(colorui, self).write, **opts)
443 win32print(a, super(colorui, self).write, **opts)
444 else:
444 else:
445 return super(colorui, self).write(
445 return super(colorui, self).write(
446 *[self.label(a, label) for a in args], **opts)
446 *[self.label(a, label) for a in args], **opts)
447
447
448 def write_err(self, *args, **opts):
448 def write_err(self, *args, **opts):
449 if self._colormode is None:
449 if self._colormode is None:
450 return super(colorui, self).write_err(*args, **opts)
450 return super(colorui, self).write_err(*args, **opts)
451
451
452 label = opts.get('label', '')
452 label = opts.get('label', '')
453 if self._bufferstates and self._bufferstates[-1][0]:
453 if self._bufferstates and self._bufferstates[-1][0]:
454 return self.write(*args, **opts)
454 return self.write(*args, **opts)
455 if self._colormode == 'win32':
455 if self._colormode == 'win32':
456 for a in args:
456 for a in args:
457 win32print(a, super(colorui, self).write_err, **opts)
457 win32print(a, super(colorui, self).write_err, **opts)
458 else:
458 else:
459 return super(colorui, self).write_err(
459 return super(colorui, self).write_err(
460 *[self.label(a, label) for a in args], **opts)
460 *[self.label(a, label) for a in args], **opts)
461
461
462 def showlabel(self, msg, label):
462 def showlabel(self, msg, label):
463 if label and msg:
463 if label and msg:
464 if msg[-1] == '\n':
464 if msg[-1] == '\n':
465 return "[%s|%s]\n" % (label, msg[:-1])
465 return "[%s|%s]\n" % (label, msg[:-1])
466 else:
466 else:
467 return "[%s|%s]" % (label, msg)
467 return "[%s|%s]" % (label, msg)
468 else:
468 else:
469 return msg
469 return msg
470
470
471 def label(self, msg, label):
471 def label(self, msg, label):
472 if self._colormode is None:
472 if self._colormode is None:
473 return super(colorui, self).label(msg, label)
473 return super(colorui, self).label(msg, label)
474
474
475 if self._colormode == 'debug':
475 if self._colormode == 'debug':
476 return self.showlabel(msg, label)
476 return self.showlabel(msg, label)
477
477
478 effects = []
478 effects = []
479 for l in label.split():
479 for l in label.split():
480 s = _styles.get(l, '')
480 s = _styles.get(l, '')
481 if s:
481 if s:
482 effects.append(s)
482 effects.append(s)
483 elif valideffect(l):
483 elif valideffect(l):
484 effects.append(l)
484 effects.append(l)
485 effects = ' '.join(effects)
485 effects = ' '.join(effects)
486 if effects:
486 if effects:
487 return '\n'.join([render_effects(s, effects)
487 return '\n'.join([render_effects(s, effects)
488 for s in msg.split('\n')])
488 for s in msg.split('\n')])
489 return msg
489 return msg
490
490
491 def uisetup(ui):
491 def uisetup(ui):
492 if ui.plain():
492 if ui.plain():
493 return
493 return
494 if not isinstance(ui, colorui):
494 if not isinstance(ui, colorui):
495 colorui.__bases__ = (ui.__class__,)
495 colorui.__bases__ = (ui.__class__,)
496 ui.__class__ = colorui
496 ui.__class__ = colorui
497 def colorcmd(orig, ui_, opts, cmd, cmdfunc):
497 def colorcmd(orig, ui_, opts, cmd, cmdfunc):
498 mode = _modesetup(ui_, opts['color'])
498 mode = _modesetup(ui_, opts['color'])
499 colorui._colormode = mode
499 colorui._colormode = mode
500 if mode and mode != 'debug':
500 if mode and mode != 'debug':
501 extstyles()
501 extstyles()
502 configstyles(ui_)
502 configstyles(ui_)
503 return orig(ui_, opts, cmd, cmdfunc)
503 return orig(ui_, opts, cmd, cmdfunc)
504 def colorgit(orig, gitsub, commands, env=None, stream=False, cwd=None):
504 def colorgit(orig, gitsub, commands, env=None, stream=False, cwd=None):
505 if gitsub.ui._colormode and len(commands) and commands[0] == "diff":
505 if gitsub.ui._colormode and len(commands) and commands[0] == "diff":
506 # insert the argument in the front,
506 # insert the argument in the front,
507 # the end of git diff arguments is used for paths
507 # the end of git diff arguments is used for paths
508 commands.insert(1, '--color')
508 commands.insert(1, '--color')
509 return orig(gitsub, commands, env, stream, cwd)
509 return orig(gitsub, commands, env, stream, cwd)
510 extensions.wrapfunction(dispatch, '_runcommand', colorcmd)
510 extensions.wrapfunction(dispatch, '_runcommand', colorcmd)
511 extensions.wrapfunction(subrepo.gitsubrepo, '_gitnodir', colorgit)
511 extensions.wrapfunction(subrepo.gitsubrepo, '_gitnodir', colorgit)
512
512
513 def extsetup(ui):
513 def extsetup(ui):
514 commands.globalopts.append(
514 commands.globalopts.append(
515 ('', 'color', 'auto',
515 ('', 'color', 'auto',
516 # i18n: 'always', 'auto', 'never', and 'debug' are keywords
516 # i18n: 'always', 'auto', 'never', and 'debug' are keywords
517 # and should not be translated
517 # and should not be translated
518 _("when to colorize (boolean, always, auto, never, or debug)"),
518 _("when to colorize (boolean, always, auto, never, or debug)"),
519 _('TYPE')))
519 _('TYPE')))
520
520
521 @command('debugcolor', [], 'hg debugcolor')
521 @command('debugcolor', [], 'hg debugcolor')
522 def debugcolor(ui, repo, **opts):
522 def debugcolor(ui, repo, **opts):
523 global _styles
523 global _styles
524 _styles = {}
524 _styles = {}
525 for effect in _effects.keys():
525 for effect in _effects.keys():
526 _styles[effect] = effect
526 _styles[effect] = effect
527 ui.write(('color mode: %s\n') % ui._colormode)
527 ui.write(('color mode: %s\n') % ui._colormode)
528 ui.write(_('available colors:\n'))
528 ui.write(_('available colors:\n'))
529 for label, colors in _styles.items():
529 for label, colors in _styles.items():
530 ui.write(('%s\n') % colors, label=label)
530 ui.write(('%s\n') % colors, label=label)
531
531
532 if os.name != 'nt':
532 if os.name != 'nt':
533 w32effects = None
533 w32effects = None
534 else:
534 else:
535 import ctypes
535 import ctypes
536 import re
536 import re
537
537
538 _kernel32 = ctypes.windll.kernel32
538 _kernel32 = ctypes.windll.kernel32
539
539
540 _WORD = ctypes.c_ushort
540 _WORD = ctypes.c_ushort
541
541
542 _INVALID_HANDLE_VALUE = -1
542 _INVALID_HANDLE_VALUE = -1
543
543
544 class _COORD(ctypes.Structure):
544 class _COORD(ctypes.Structure):
545 _fields_ = [('X', ctypes.c_short),
545 _fields_ = [('X', ctypes.c_short),
546 ('Y', ctypes.c_short)]
546 ('Y', ctypes.c_short)]
547
547
548 class _SMALL_RECT(ctypes.Structure):
548 class _SMALL_RECT(ctypes.Structure):
549 _fields_ = [('Left', ctypes.c_short),
549 _fields_ = [('Left', ctypes.c_short),
550 ('Top', ctypes.c_short),
550 ('Top', ctypes.c_short),
551 ('Right', ctypes.c_short),
551 ('Right', ctypes.c_short),
552 ('Bottom', ctypes.c_short)]
552 ('Bottom', ctypes.c_short)]
553
553
554 class _CONSOLE_SCREEN_BUFFER_INFO(ctypes.Structure):
554 class _CONSOLE_SCREEN_BUFFER_INFO(ctypes.Structure):
555 _fields_ = [('dwSize', _COORD),
555 _fields_ = [('dwSize', _COORD),
556 ('dwCursorPosition', _COORD),
556 ('dwCursorPosition', _COORD),
557 ('wAttributes', _WORD),
557 ('wAttributes', _WORD),
558 ('srWindow', _SMALL_RECT),
558 ('srWindow', _SMALL_RECT),
559 ('dwMaximumWindowSize', _COORD)]
559 ('dwMaximumWindowSize', _COORD)]
560
560
561 _STD_OUTPUT_HANDLE = 0xfffffff5L # (DWORD)-11
561 _STD_OUTPUT_HANDLE = 0xfffffff5L # (DWORD)-11
562 _STD_ERROR_HANDLE = 0xfffffff4L # (DWORD)-12
562 _STD_ERROR_HANDLE = 0xfffffff4L # (DWORD)-12
563
563
564 _FOREGROUND_BLUE = 0x0001
564 _FOREGROUND_BLUE = 0x0001
565 _FOREGROUND_GREEN = 0x0002
565 _FOREGROUND_GREEN = 0x0002
566 _FOREGROUND_RED = 0x0004
566 _FOREGROUND_RED = 0x0004
567 _FOREGROUND_INTENSITY = 0x0008
567 _FOREGROUND_INTENSITY = 0x0008
568
568
569 _BACKGROUND_BLUE = 0x0010
569 _BACKGROUND_BLUE = 0x0010
570 _BACKGROUND_GREEN = 0x0020
570 _BACKGROUND_GREEN = 0x0020
571 _BACKGROUND_RED = 0x0040
571 _BACKGROUND_RED = 0x0040
572 _BACKGROUND_INTENSITY = 0x0080
572 _BACKGROUND_INTENSITY = 0x0080
573
573
574 _COMMON_LVB_REVERSE_VIDEO = 0x4000
574 _COMMON_LVB_REVERSE_VIDEO = 0x4000
575 _COMMON_LVB_UNDERSCORE = 0x8000
575 _COMMON_LVB_UNDERSCORE = 0x8000
576
576
577 # http://msdn.microsoft.com/en-us/library/ms682088%28VS.85%29.aspx
577 # http://msdn.microsoft.com/en-us/library/ms682088%28VS.85%29.aspx
578 w32effects = {
578 w32effects = {
579 'none': -1,
579 'none': -1,
580 'black': 0,
580 'black': 0,
581 'red': _FOREGROUND_RED,
581 'red': _FOREGROUND_RED,
582 'green': _FOREGROUND_GREEN,
582 'green': _FOREGROUND_GREEN,
583 'yellow': _FOREGROUND_RED | _FOREGROUND_GREEN,
583 'yellow': _FOREGROUND_RED | _FOREGROUND_GREEN,
584 'blue': _FOREGROUND_BLUE,
584 'blue': _FOREGROUND_BLUE,
585 'magenta': _FOREGROUND_BLUE | _FOREGROUND_RED,
585 'magenta': _FOREGROUND_BLUE | _FOREGROUND_RED,
586 'cyan': _FOREGROUND_BLUE | _FOREGROUND_GREEN,
586 'cyan': _FOREGROUND_BLUE | _FOREGROUND_GREEN,
587 'white': _FOREGROUND_RED | _FOREGROUND_GREEN | _FOREGROUND_BLUE,
587 'white': _FOREGROUND_RED | _FOREGROUND_GREEN | _FOREGROUND_BLUE,
588 'bold': _FOREGROUND_INTENSITY,
588 'bold': _FOREGROUND_INTENSITY,
589 'black_background': 0x100, # unused value > 0x0f
589 'black_background': 0x100, # unused value > 0x0f
590 'red_background': _BACKGROUND_RED,
590 'red_background': _BACKGROUND_RED,
591 'green_background': _BACKGROUND_GREEN,
591 'green_background': _BACKGROUND_GREEN,
592 'yellow_background': _BACKGROUND_RED | _BACKGROUND_GREEN,
592 'yellow_background': _BACKGROUND_RED | _BACKGROUND_GREEN,
593 'blue_background': _BACKGROUND_BLUE,
593 'blue_background': _BACKGROUND_BLUE,
594 'purple_background': _BACKGROUND_BLUE | _BACKGROUND_RED,
594 'purple_background': _BACKGROUND_BLUE | _BACKGROUND_RED,
595 'cyan_background': _BACKGROUND_BLUE | _BACKGROUND_GREEN,
595 'cyan_background': _BACKGROUND_BLUE | _BACKGROUND_GREEN,
596 'white_background': (_BACKGROUND_RED | _BACKGROUND_GREEN |
596 'white_background': (_BACKGROUND_RED | _BACKGROUND_GREEN |
597 _BACKGROUND_BLUE),
597 _BACKGROUND_BLUE),
598 'bold_background': _BACKGROUND_INTENSITY,
598 'bold_background': _BACKGROUND_INTENSITY,
599 'underline': _COMMON_LVB_UNDERSCORE, # double-byte charsets only
599 'underline': _COMMON_LVB_UNDERSCORE, # double-byte charsets only
600 'inverse': _COMMON_LVB_REVERSE_VIDEO, # double-byte charsets only
600 'inverse': _COMMON_LVB_REVERSE_VIDEO, # double-byte charsets only
601 }
601 }
602
602
603 passthrough = set([_FOREGROUND_INTENSITY,
603 passthrough = set([_FOREGROUND_INTENSITY,
604 _BACKGROUND_INTENSITY,
604 _BACKGROUND_INTENSITY,
605 _COMMON_LVB_UNDERSCORE,
605 _COMMON_LVB_UNDERSCORE,
606 _COMMON_LVB_REVERSE_VIDEO])
606 _COMMON_LVB_REVERSE_VIDEO])
607
607
608 stdout = _kernel32.GetStdHandle(
608 stdout = _kernel32.GetStdHandle(
609 _STD_OUTPUT_HANDLE) # don't close the handle returned
609 _STD_OUTPUT_HANDLE) # don't close the handle returned
610 if stdout is None or stdout == _INVALID_HANDLE_VALUE:
610 if stdout is None or stdout == _INVALID_HANDLE_VALUE:
611 w32effects = None
611 w32effects = None
612 else:
612 else:
613 csbi = _CONSOLE_SCREEN_BUFFER_INFO()
613 csbi = _CONSOLE_SCREEN_BUFFER_INFO()
614 if not _kernel32.GetConsoleScreenBufferInfo(
614 if not _kernel32.GetConsoleScreenBufferInfo(
615 stdout, ctypes.byref(csbi)):
615 stdout, ctypes.byref(csbi)):
616 # stdout may not support GetConsoleScreenBufferInfo()
616 # stdout may not support GetConsoleScreenBufferInfo()
617 # when called from subprocess or redirected
617 # when called from subprocess or redirected
618 w32effects = None
618 w32effects = None
619 else:
619 else:
620 origattr = csbi.wAttributes
620 origattr = csbi.wAttributes
621 ansire = re.compile('\033\[([^m]*)m([^\033]*)(.*)',
621 ansire = re.compile('\033\[([^m]*)m([^\033]*)(.*)',
622 re.MULTILINE | re.DOTALL)
622 re.MULTILINE | re.DOTALL)
623
623
624 def win32print(text, orig, **opts):
624 def win32print(text, orig, **opts):
625 label = opts.get('label', '')
625 label = opts.get('label', '')
626 attr = origattr
626 attr = origattr
627
627
628 def mapcolor(val, attr):
628 def mapcolor(val, attr):
629 if val == -1:
629 if val == -1:
630 return origattr
630 return origattr
631 elif val in passthrough:
631 elif val in passthrough:
632 return attr | val
632 return attr | val
633 elif val > 0x0f:
633 elif val > 0x0f:
634 return (val & 0x70) | (attr & 0x8f)
634 return (val & 0x70) | (attr & 0x8f)
635 else:
635 else:
636 return (val & 0x07) | (attr & 0xf8)
636 return (val & 0x07) | (attr & 0xf8)
637
637
638 # determine console attributes based on labels
638 # determine console attributes based on labels
639 for l in label.split():
639 for l in label.split():
640 style = _styles.get(l, '')
640 style = _styles.get(l, '')
641 for effect in style.split():
641 for effect in style.split():
642 try:
642 try:
643 attr = mapcolor(w32effects[effect], attr)
643 attr = mapcolor(w32effects[effect], attr)
644 except KeyError:
644 except KeyError:
645 # w32effects could not have certain attributes so we skip
645 # w32effects could not have certain attributes so we skip
646 # them if not found
646 # them if not found
647 pass
647 pass
648 # hack to ensure regexp finds data
648 # hack to ensure regexp finds data
649 if not text.startswith('\033['):
649 if not text.startswith('\033['):
650 text = '\033[m' + text
650 text = '\033[m' + text
651
651
652 # Look for ANSI-like codes embedded in text
652 # Look for ANSI-like codes embedded in text
653 m = re.match(ansire, text)
653 m = re.match(ansire, text)
654
654
655 try:
655 try:
656 while m:
656 while m:
657 for sattr in m.group(1).split(';'):
657 for sattr in m.group(1).split(';'):
658 if sattr:
658 if sattr:
659 attr = mapcolor(int(sattr), attr)
659 attr = mapcolor(int(sattr), attr)
660 _kernel32.SetConsoleTextAttribute(stdout, attr)
660 _kernel32.SetConsoleTextAttribute(stdout, attr)
661 orig(m.group(2), **opts)
661 orig(m.group(2), **opts)
662 m = re.match(ansire, m.group(3))
662 m = re.match(ansire, m.group(3))
663 finally:
663 finally:
664 # Explicitly reset original attributes
664 # Explicitly reset original attributes
665 _kernel32.SetConsoleTextAttribute(stdout, origattr)
665 _kernel32.SetConsoleTextAttribute(stdout, origattr)
@@ -1,458 +1,458 b''
1 # convert.py Foreign SCM converter
1 # convert.py Foreign SCM converter
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 '''import revisions from foreign VCS repositories into Mercurial'''
8 '''import revisions from foreign VCS repositories into Mercurial'''
9
9
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11
11
12 from mercurial.i18n import _
12 from mercurial.i18n import _
13 from mercurial import (
13 from mercurial import (
14 cmdutil,
14 cmdutil,
15 registrar,
15 registrar,
16 )
16 )
17
17
18 from . import (
18 from . import (
19 convcmd,
19 convcmd,
20 cvsps,
20 cvsps,
21 subversion,
21 subversion,
22 )
22 )
23
23
24 cmdtable = {}
24 cmdtable = {}
25 command = cmdutil.command(cmdtable)
25 command = cmdutil.command(cmdtable)
26 # Note for extension authors: ONLY specify testedwith = 'internal' for
26 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
27 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
27 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
28 # be specifying the version(s) of Mercurial they are tested with, or
28 # be specifying the version(s) of Mercurial they are tested with, or
29 # leave the attribute unspecified.
29 # leave the attribute unspecified.
30 testedwith = 'internal'
30 testedwith = 'ships-with-hg-core'
31
31
32 # Commands definition was moved elsewhere to ease demandload job.
32 # Commands definition was moved elsewhere to ease demandload job.
33
33
34 @command('convert',
34 @command('convert',
35 [('', 'authors', '',
35 [('', 'authors', '',
36 _('username mapping filename (DEPRECATED) (use --authormap instead)'),
36 _('username mapping filename (DEPRECATED) (use --authormap instead)'),
37 _('FILE')),
37 _('FILE')),
38 ('s', 'source-type', '', _('source repository type'), _('TYPE')),
38 ('s', 'source-type', '', _('source repository type'), _('TYPE')),
39 ('d', 'dest-type', '', _('destination repository type'), _('TYPE')),
39 ('d', 'dest-type', '', _('destination repository type'), _('TYPE')),
40 ('r', 'rev', [], _('import up to source revision REV'), _('REV')),
40 ('r', 'rev', [], _('import up to source revision REV'), _('REV')),
41 ('A', 'authormap', '', _('remap usernames using this file'), _('FILE')),
41 ('A', 'authormap', '', _('remap usernames using this file'), _('FILE')),
42 ('', 'filemap', '', _('remap file names using contents of file'),
42 ('', 'filemap', '', _('remap file names using contents of file'),
43 _('FILE')),
43 _('FILE')),
44 ('', 'full', None,
44 ('', 'full', None,
45 _('apply filemap changes by converting all files again')),
45 _('apply filemap changes by converting all files again')),
46 ('', 'splicemap', '', _('splice synthesized history into place'),
46 ('', 'splicemap', '', _('splice synthesized history into place'),
47 _('FILE')),
47 _('FILE')),
48 ('', 'branchmap', '', _('change branch names while converting'),
48 ('', 'branchmap', '', _('change branch names while converting'),
49 _('FILE')),
49 _('FILE')),
50 ('', 'branchsort', None, _('try to sort changesets by branches')),
50 ('', 'branchsort', None, _('try to sort changesets by branches')),
51 ('', 'datesort', None, _('try to sort changesets by date')),
51 ('', 'datesort', None, _('try to sort changesets by date')),
52 ('', 'sourcesort', None, _('preserve source changesets order')),
52 ('', 'sourcesort', None, _('preserve source changesets order')),
53 ('', 'closesort', None, _('try to reorder closed revisions'))],
53 ('', 'closesort', None, _('try to reorder closed revisions'))],
54 _('hg convert [OPTION]... SOURCE [DEST [REVMAP]]'),
54 _('hg convert [OPTION]... SOURCE [DEST [REVMAP]]'),
55 norepo=True)
55 norepo=True)
56 def convert(ui, src, dest=None, revmapfile=None, **opts):
56 def convert(ui, src, dest=None, revmapfile=None, **opts):
57 """convert a foreign SCM repository to a Mercurial one.
57 """convert a foreign SCM repository to a Mercurial one.
58
58
59 Accepted source formats [identifiers]:
59 Accepted source formats [identifiers]:
60
60
61 - Mercurial [hg]
61 - Mercurial [hg]
62 - CVS [cvs]
62 - CVS [cvs]
63 - Darcs [darcs]
63 - Darcs [darcs]
64 - git [git]
64 - git [git]
65 - Subversion [svn]
65 - Subversion [svn]
66 - Monotone [mtn]
66 - Monotone [mtn]
67 - GNU Arch [gnuarch]
67 - GNU Arch [gnuarch]
68 - Bazaar [bzr]
68 - Bazaar [bzr]
69 - Perforce [p4]
69 - Perforce [p4]
70
70
71 Accepted destination formats [identifiers]:
71 Accepted destination formats [identifiers]:
72
72
73 - Mercurial [hg]
73 - Mercurial [hg]
74 - Subversion [svn] (history on branches is not preserved)
74 - Subversion [svn] (history on branches is not preserved)
75
75
76 If no revision is given, all revisions will be converted.
76 If no revision is given, all revisions will be converted.
77 Otherwise, convert will only import up to the named revision
77 Otherwise, convert will only import up to the named revision
78 (given in a format understood by the source).
78 (given in a format understood by the source).
79
79
80 If no destination directory name is specified, it defaults to the
80 If no destination directory name is specified, it defaults to the
81 basename of the source with ``-hg`` appended. If the destination
81 basename of the source with ``-hg`` appended. If the destination
82 repository doesn't exist, it will be created.
82 repository doesn't exist, it will be created.
83
83
84 By default, all sources except Mercurial will use --branchsort.
84 By default, all sources except Mercurial will use --branchsort.
85 Mercurial uses --sourcesort to preserve original revision numbers
85 Mercurial uses --sourcesort to preserve original revision numbers
86 order. Sort modes have the following effects:
86 order. Sort modes have the following effects:
87
87
88 --branchsort convert from parent to child revision when possible,
88 --branchsort convert from parent to child revision when possible,
89 which means branches are usually converted one after
89 which means branches are usually converted one after
90 the other. It generates more compact repositories.
90 the other. It generates more compact repositories.
91
91
92 --datesort sort revisions by date. Converted repositories have
92 --datesort sort revisions by date. Converted repositories have
93 good-looking changelogs but are often an order of
93 good-looking changelogs but are often an order of
94 magnitude larger than the same ones generated by
94 magnitude larger than the same ones generated by
95 --branchsort.
95 --branchsort.
96
96
97 --sourcesort try to preserve source revisions order, only
97 --sourcesort try to preserve source revisions order, only
98 supported by Mercurial sources.
98 supported by Mercurial sources.
99
99
100 --closesort try to move closed revisions as close as possible
100 --closesort try to move closed revisions as close as possible
101 to parent branches, only supported by Mercurial
101 to parent branches, only supported by Mercurial
102 sources.
102 sources.
103
103
104 If ``REVMAP`` isn't given, it will be put in a default location
104 If ``REVMAP`` isn't given, it will be put in a default location
105 (``<dest>/.hg/shamap`` by default). The ``REVMAP`` is a simple
105 (``<dest>/.hg/shamap`` by default). The ``REVMAP`` is a simple
106 text file that maps each source commit ID to the destination ID
106 text file that maps each source commit ID to the destination ID
107 for that revision, like so::
107 for that revision, like so::
108
108
109 <source ID> <destination ID>
109 <source ID> <destination ID>
110
110
111 If the file doesn't exist, it's automatically created. It's
111 If the file doesn't exist, it's automatically created. It's
112 updated on each commit copied, so :hg:`convert` can be interrupted
112 updated on each commit copied, so :hg:`convert` can be interrupted
113 and can be run repeatedly to copy new commits.
113 and can be run repeatedly to copy new commits.
114
114
115 The authormap is a simple text file that maps each source commit
115 The authormap is a simple text file that maps each source commit
116 author to a destination commit author. It is handy for source SCMs
116 author to a destination commit author. It is handy for source SCMs
117 that use unix logins to identify authors (e.g.: CVS). One line per
117 that use unix logins to identify authors (e.g.: CVS). One line per
118 author mapping and the line format is::
118 author mapping and the line format is::
119
119
120 source author = destination author
120 source author = destination author
121
121
122 Empty lines and lines starting with a ``#`` are ignored.
122 Empty lines and lines starting with a ``#`` are ignored.
123
123
124 The filemap is a file that allows filtering and remapping of files
124 The filemap is a file that allows filtering and remapping of files
125 and directories. Each line can contain one of the following
125 and directories. Each line can contain one of the following
126 directives::
126 directives::
127
127
128 include path/to/file-or-dir
128 include path/to/file-or-dir
129
129
130 exclude path/to/file-or-dir
130 exclude path/to/file-or-dir
131
131
132 rename path/to/source path/to/destination
132 rename path/to/source path/to/destination
133
133
134 Comment lines start with ``#``. A specified path matches if it
134 Comment lines start with ``#``. A specified path matches if it
135 equals the full relative name of a file or one of its parent
135 equals the full relative name of a file or one of its parent
136 directories. The ``include`` or ``exclude`` directive with the
136 directories. The ``include`` or ``exclude`` directive with the
137 longest matching path applies, so line order does not matter.
137 longest matching path applies, so line order does not matter.
138
138
139 The ``include`` directive causes a file, or all files under a
139 The ``include`` directive causes a file, or all files under a
140 directory, to be included in the destination repository. The default
140 directory, to be included in the destination repository. The default
141 if there are no ``include`` statements is to include everything.
141 if there are no ``include`` statements is to include everything.
142 If there are any ``include`` statements, nothing else is included.
142 If there are any ``include`` statements, nothing else is included.
143 The ``exclude`` directive causes files or directories to
143 The ``exclude`` directive causes files or directories to
144 be omitted. The ``rename`` directive renames a file or directory if
144 be omitted. The ``rename`` directive renames a file or directory if
145 it is converted. To rename from a subdirectory into the root of
145 it is converted. To rename from a subdirectory into the root of
146 the repository, use ``.`` as the path to rename to.
146 the repository, use ``.`` as the path to rename to.
147
147
148 ``--full`` will make sure the converted changesets contain exactly
148 ``--full`` will make sure the converted changesets contain exactly
149 the right files with the right content. It will make a full
149 the right files with the right content. It will make a full
150 conversion of all files, not just the ones that have
150 conversion of all files, not just the ones that have
151 changed. Files that already are correct will not be changed. This
151 changed. Files that already are correct will not be changed. This
152 can be used to apply filemap changes when converting
152 can be used to apply filemap changes when converting
153 incrementally. This is currently only supported for Mercurial and
153 incrementally. This is currently only supported for Mercurial and
154 Subversion.
154 Subversion.
155
155
156 The splicemap is a file that allows insertion of synthetic
156 The splicemap is a file that allows insertion of synthetic
157 history, letting you specify the parents of a revision. This is
157 history, letting you specify the parents of a revision. This is
158 useful if you want to e.g. give a Subversion merge two parents, or
158 useful if you want to e.g. give a Subversion merge two parents, or
159 graft two disconnected series of history together. Each entry
159 graft two disconnected series of history together. Each entry
160 contains a key, followed by a space, followed by one or two
160 contains a key, followed by a space, followed by one or two
161 comma-separated values::
161 comma-separated values::
162
162
163 key parent1, parent2
163 key parent1, parent2
164
164
165 The key is the revision ID in the source
165 The key is the revision ID in the source
166 revision control system whose parents should be modified (same
166 revision control system whose parents should be modified (same
167 format as a key in .hg/shamap). The values are the revision IDs
167 format as a key in .hg/shamap). The values are the revision IDs
168 (in either the source or destination revision control system) that
168 (in either the source or destination revision control system) that
169 should be used as the new parents for that node. For example, if
169 should be used as the new parents for that node. For example, if
170 you have merged "release-1.0" into "trunk", then you should
170 you have merged "release-1.0" into "trunk", then you should
171 specify the revision on "trunk" as the first parent and the one on
171 specify the revision on "trunk" as the first parent and the one on
172 the "release-1.0" branch as the second.
172 the "release-1.0" branch as the second.
173
173
174 The branchmap is a file that allows you to rename a branch when it is
174 The branchmap is a file that allows you to rename a branch when it is
175 being brought in from whatever external repository. When used in
175 being brought in from whatever external repository. When used in
176 conjunction with a splicemap, it allows for a powerful combination
176 conjunction with a splicemap, it allows for a powerful combination
177 to help fix even the most badly mismanaged repositories and turn them
177 to help fix even the most badly mismanaged repositories and turn them
178 into nicely structured Mercurial repositories. The branchmap contains
178 into nicely structured Mercurial repositories. The branchmap contains
179 lines of the form::
179 lines of the form::
180
180
181 original_branch_name new_branch_name
181 original_branch_name new_branch_name
182
182
183 where "original_branch_name" is the name of the branch in the
183 where "original_branch_name" is the name of the branch in the
184 source repository, and "new_branch_name" is the name of the branch
184 source repository, and "new_branch_name" is the name of the branch
185 is the destination repository. No whitespace is allowed in the
185 is the destination repository. No whitespace is allowed in the
186 branch names. This can be used to (for instance) move code in one
186 branch names. This can be used to (for instance) move code in one
187 repository from "default" to a named branch.
187 repository from "default" to a named branch.
188
188
189 Mercurial Source
189 Mercurial Source
190 ################
190 ################
191
191
192 The Mercurial source recognizes the following configuration
192 The Mercurial source recognizes the following configuration
193 options, which you can set on the command line with ``--config``:
193 options, which you can set on the command line with ``--config``:
194
194
195 :convert.hg.ignoreerrors: ignore integrity errors when reading.
195 :convert.hg.ignoreerrors: ignore integrity errors when reading.
196 Use it to fix Mercurial repositories with missing revlogs, by
196 Use it to fix Mercurial repositories with missing revlogs, by
197 converting from and to Mercurial. Default is False.
197 converting from and to Mercurial. Default is False.
198
198
199 :convert.hg.saverev: store original revision ID in changeset
199 :convert.hg.saverev: store original revision ID in changeset
200 (forces target IDs to change). It takes a boolean argument and
200 (forces target IDs to change). It takes a boolean argument and
201 defaults to False.
201 defaults to False.
202
202
203 :convert.hg.startrev: specify the initial Mercurial revision.
203 :convert.hg.startrev: specify the initial Mercurial revision.
204 The default is 0.
204 The default is 0.
205
205
206 :convert.hg.revs: revset specifying the source revisions to convert.
206 :convert.hg.revs: revset specifying the source revisions to convert.
207
207
208 CVS Source
208 CVS Source
209 ##########
209 ##########
210
210
211 CVS source will use a sandbox (i.e. a checked-out copy) from CVS
211 CVS source will use a sandbox (i.e. a checked-out copy) from CVS
212 to indicate the starting point of what will be converted. Direct
212 to indicate the starting point of what will be converted. Direct
213 access to the repository files is not needed, unless of course the
213 access to the repository files is not needed, unless of course the
214 repository is ``:local:``. The conversion uses the top level
214 repository is ``:local:``. The conversion uses the top level
215 directory in the sandbox to find the CVS repository, and then uses
215 directory in the sandbox to find the CVS repository, and then uses
216 CVS rlog commands to find files to convert. This means that unless
216 CVS rlog commands to find files to convert. This means that unless
217 a filemap is given, all files under the starting directory will be
217 a filemap is given, all files under the starting directory will be
218 converted, and that any directory reorganization in the CVS
218 converted, and that any directory reorganization in the CVS
219 sandbox is ignored.
219 sandbox is ignored.
220
220
221 The following options can be used with ``--config``:
221 The following options can be used with ``--config``:
222
222
223 :convert.cvsps.cache: Set to False to disable remote log caching,
223 :convert.cvsps.cache: Set to False to disable remote log caching,
224 for testing and debugging purposes. Default is True.
224 for testing and debugging purposes. Default is True.
225
225
226 :convert.cvsps.fuzz: Specify the maximum time (in seconds) that is
226 :convert.cvsps.fuzz: Specify the maximum time (in seconds) that is
227 allowed between commits with identical user and log message in
227 allowed between commits with identical user and log message in
228 a single changeset. When very large files were checked in as
228 a single changeset. When very large files were checked in as
229 part of a changeset then the default may not be long enough.
229 part of a changeset then the default may not be long enough.
230 The default is 60.
230 The default is 60.
231
231
232 :convert.cvsps.mergeto: Specify a regular expression to which
232 :convert.cvsps.mergeto: Specify a regular expression to which
233 commit log messages are matched. If a match occurs, then the
233 commit log messages are matched. If a match occurs, then the
234 conversion process will insert a dummy revision merging the
234 conversion process will insert a dummy revision merging the
235 branch on which this log message occurs to the branch
235 branch on which this log message occurs to the branch
236 indicated in the regex. Default is ``{{mergetobranch
236 indicated in the regex. Default is ``{{mergetobranch
237 ([-\\w]+)}}``
237 ([-\\w]+)}}``
238
238
239 :convert.cvsps.mergefrom: Specify a regular expression to which
239 :convert.cvsps.mergefrom: Specify a regular expression to which
240 commit log messages are matched. If a match occurs, then the
240 commit log messages are matched. If a match occurs, then the
241 conversion process will add the most recent revision on the
241 conversion process will add the most recent revision on the
242 branch indicated in the regex as the second parent of the
242 branch indicated in the regex as the second parent of the
243 changeset. Default is ``{{mergefrombranch ([-\\w]+)}}``
243 changeset. Default is ``{{mergefrombranch ([-\\w]+)}}``
244
244
245 :convert.localtimezone: use local time (as determined by the TZ
245 :convert.localtimezone: use local time (as determined by the TZ
246 environment variable) for changeset date/times. The default
246 environment variable) for changeset date/times. The default
247 is False (use UTC).
247 is False (use UTC).
248
248
249 :hooks.cvslog: Specify a Python function to be called at the end of
249 :hooks.cvslog: Specify a Python function to be called at the end of
250 gathering the CVS log. The function is passed a list with the
250 gathering the CVS log. The function is passed a list with the
251 log entries, and can modify the entries in-place, or add or
251 log entries, and can modify the entries in-place, or add or
252 delete them.
252 delete them.
253
253
254 :hooks.cvschangesets: Specify a Python function to be called after
254 :hooks.cvschangesets: Specify a Python function to be called after
255 the changesets are calculated from the CVS log. The
255 the changesets are calculated from the CVS log. The
256 function is passed a list with the changeset entries, and can
256 function is passed a list with the changeset entries, and can
257 modify the changesets in-place, or add or delete them.
257 modify the changesets in-place, or add or delete them.
258
258
259 An additional "debugcvsps" Mercurial command allows the builtin
259 An additional "debugcvsps" Mercurial command allows the builtin
260 changeset merging code to be run without doing a conversion. Its
260 changeset merging code to be run without doing a conversion. Its
261 parameters and output are similar to that of cvsps 2.1. Please see
261 parameters and output are similar to that of cvsps 2.1. Please see
262 the command help for more details.
262 the command help for more details.
263
263
264 Subversion Source
264 Subversion Source
265 #################
265 #################
266
266
267 Subversion source detects classical trunk/branches/tags layouts.
267 Subversion source detects classical trunk/branches/tags layouts.
268 By default, the supplied ``svn://repo/path/`` source URL is
268 By default, the supplied ``svn://repo/path/`` source URL is
269 converted as a single branch. If ``svn://repo/path/trunk`` exists
269 converted as a single branch. If ``svn://repo/path/trunk`` exists
270 it replaces the default branch. If ``svn://repo/path/branches``
270 it replaces the default branch. If ``svn://repo/path/branches``
271 exists, its subdirectories are listed as possible branches. If
271 exists, its subdirectories are listed as possible branches. If
272 ``svn://repo/path/tags`` exists, it is looked for tags referencing
272 ``svn://repo/path/tags`` exists, it is looked for tags referencing
273 converted branches. Default ``trunk``, ``branches`` and ``tags``
273 converted branches. Default ``trunk``, ``branches`` and ``tags``
274 values can be overridden with following options. Set them to paths
274 values can be overridden with following options. Set them to paths
275 relative to the source URL, or leave them blank to disable auto
275 relative to the source URL, or leave them blank to disable auto
276 detection.
276 detection.
277
277
278 The following options can be set with ``--config``:
278 The following options can be set with ``--config``:
279
279
280 :convert.svn.branches: specify the directory containing branches.
280 :convert.svn.branches: specify the directory containing branches.
281 The default is ``branches``.
281 The default is ``branches``.
282
282
283 :convert.svn.tags: specify the directory containing tags. The
283 :convert.svn.tags: specify the directory containing tags. The
284 default is ``tags``.
284 default is ``tags``.
285
285
286 :convert.svn.trunk: specify the name of the trunk branch. The
286 :convert.svn.trunk: specify the name of the trunk branch. The
287 default is ``trunk``.
287 default is ``trunk``.
288
288
289 :convert.localtimezone: use local time (as determined by the TZ
289 :convert.localtimezone: use local time (as determined by the TZ
290 environment variable) for changeset date/times. The default
290 environment variable) for changeset date/times. The default
291 is False (use UTC).
291 is False (use UTC).
292
292
293 Source history can be retrieved starting at a specific revision,
293 Source history can be retrieved starting at a specific revision,
294 instead of being integrally converted. Only single branch
294 instead of being integrally converted. Only single branch
295 conversions are supported.
295 conversions are supported.
296
296
297 :convert.svn.startrev: specify start Subversion revision number.
297 :convert.svn.startrev: specify start Subversion revision number.
298 The default is 0.
298 The default is 0.
299
299
300 Git Source
300 Git Source
301 ##########
301 ##########
302
302
303 The Git importer converts commits from all reachable branches (refs
303 The Git importer converts commits from all reachable branches (refs
304 in refs/heads) and remotes (refs in refs/remotes) to Mercurial.
304 in refs/heads) and remotes (refs in refs/remotes) to Mercurial.
305 Branches are converted to bookmarks with the same name, with the
305 Branches are converted to bookmarks with the same name, with the
306 leading 'refs/heads' stripped. Git submodules are converted to Git
306 leading 'refs/heads' stripped. Git submodules are converted to Git
307 subrepos in Mercurial.
307 subrepos in Mercurial.
308
308
309 The following options can be set with ``--config``:
309 The following options can be set with ``--config``:
310
310
311 :convert.git.similarity: specify how similar files modified in a
311 :convert.git.similarity: specify how similar files modified in a
312 commit must be to be imported as renames or copies, as a
312 commit must be to be imported as renames or copies, as a
313 percentage between ``0`` (disabled) and ``100`` (files must be
313 percentage between ``0`` (disabled) and ``100`` (files must be
314 identical). For example, ``90`` means that a delete/add pair will
314 identical). For example, ``90`` means that a delete/add pair will
315 be imported as a rename if more than 90% of the file hasn't
315 be imported as a rename if more than 90% of the file hasn't
316 changed. The default is ``50``.
316 changed. The default is ``50``.
317
317
318 :convert.git.findcopiesharder: while detecting copies, look at all
318 :convert.git.findcopiesharder: while detecting copies, look at all
319 files in the working copy instead of just changed ones. This
319 files in the working copy instead of just changed ones. This
320 is very expensive for large projects, and is only effective when
320 is very expensive for large projects, and is only effective when
321 ``convert.git.similarity`` is greater than 0. The default is False.
321 ``convert.git.similarity`` is greater than 0. The default is False.
322
322
323 :convert.git.remoteprefix: remote refs are converted as bookmarks with
323 :convert.git.remoteprefix: remote refs are converted as bookmarks with
324 ``convert.git.remoteprefix`` as a prefix followed by a /. The default
324 ``convert.git.remoteprefix`` as a prefix followed by a /. The default
325 is 'remote'.
325 is 'remote'.
326
326
327 :convert.git.skipsubmodules: does not convert root level .gitmodules files
327 :convert.git.skipsubmodules: does not convert root level .gitmodules files
328 or files with 160000 mode indicating a submodule. Default is False.
328 or files with 160000 mode indicating a submodule. Default is False.
329
329
330 Perforce Source
330 Perforce Source
331 ###############
331 ###############
332
332
333 The Perforce (P4) importer can be given a p4 depot path or a
333 The Perforce (P4) importer can be given a p4 depot path or a
334 client specification as source. It will convert all files in the
334 client specification as source. It will convert all files in the
335 source to a flat Mercurial repository, ignoring labels, branches
335 source to a flat Mercurial repository, ignoring labels, branches
336 and integrations. Note that when a depot path is given you then
336 and integrations. Note that when a depot path is given you then
337 usually should specify a target directory, because otherwise the
337 usually should specify a target directory, because otherwise the
338 target may be named ``...-hg``.
338 target may be named ``...-hg``.
339
339
340 The following options can be set with ``--config``:
340 The following options can be set with ``--config``:
341
341
342 :convert.p4.encoding: specify the encoding to use when decoding standard
342 :convert.p4.encoding: specify the encoding to use when decoding standard
343 output of the Perforce command line tool. The default is default system
343 output of the Perforce command line tool. The default is default system
344 encoding.
344 encoding.
345
345
346 :convert.p4.startrev: specify initial Perforce revision (a
346 :convert.p4.startrev: specify initial Perforce revision (a
347 Perforce changelist number).
347 Perforce changelist number).
348
348
349 Mercurial Destination
349 Mercurial Destination
350 #####################
350 #####################
351
351
352 The Mercurial destination will recognize Mercurial subrepositories in the
352 The Mercurial destination will recognize Mercurial subrepositories in the
353 destination directory, and update the .hgsubstate file automatically if the
353 destination directory, and update the .hgsubstate file automatically if the
354 destination subrepositories contain the <dest>/<sub>/.hg/shamap file.
354 destination subrepositories contain the <dest>/<sub>/.hg/shamap file.
355 Converting a repository with subrepositories requires converting a single
355 Converting a repository with subrepositories requires converting a single
356 repository at a time, from the bottom up.
356 repository at a time, from the bottom up.
357
357
358 .. container:: verbose
358 .. container:: verbose
359
359
360 An example showing how to convert a repository with subrepositories::
360 An example showing how to convert a repository with subrepositories::
361
361
362 # so convert knows the type when it sees a non empty destination
362 # so convert knows the type when it sees a non empty destination
363 $ hg init converted
363 $ hg init converted
364
364
365 $ hg convert orig/sub1 converted/sub1
365 $ hg convert orig/sub1 converted/sub1
366 $ hg convert orig/sub2 converted/sub2
366 $ hg convert orig/sub2 converted/sub2
367 $ hg convert orig converted
367 $ hg convert orig converted
368
368
369 The following options are supported:
369 The following options are supported:
370
370
371 :convert.hg.clonebranches: dispatch source branches in separate
371 :convert.hg.clonebranches: dispatch source branches in separate
372 clones. The default is False.
372 clones. The default is False.
373
373
374 :convert.hg.tagsbranch: branch name for tag revisions, defaults to
374 :convert.hg.tagsbranch: branch name for tag revisions, defaults to
375 ``default``.
375 ``default``.
376
376
377 :convert.hg.usebranchnames: preserve branch names. The default is
377 :convert.hg.usebranchnames: preserve branch names. The default is
378 True.
378 True.
379
379
380 :convert.hg.sourcename: records the given string as a 'convert_source' extra
380 :convert.hg.sourcename: records the given string as a 'convert_source' extra
381 value on each commit made in the target repository. The default is None.
381 value on each commit made in the target repository. The default is None.
382
382
383 All Destinations
383 All Destinations
384 ################
384 ################
385
385
386 All destination types accept the following options:
386 All destination types accept the following options:
387
387
388 :convert.skiptags: does not convert tags from the source repo to the target
388 :convert.skiptags: does not convert tags from the source repo to the target
389 repo. The default is False.
389 repo. The default is False.
390 """
390 """
391 return convcmd.convert(ui, src, dest, revmapfile, **opts)
391 return convcmd.convert(ui, src, dest, revmapfile, **opts)
392
392
393 @command('debugsvnlog', [], 'hg debugsvnlog', norepo=True)
393 @command('debugsvnlog', [], 'hg debugsvnlog', norepo=True)
394 def debugsvnlog(ui, **opts):
394 def debugsvnlog(ui, **opts):
395 return subversion.debugsvnlog(ui, **opts)
395 return subversion.debugsvnlog(ui, **opts)
396
396
397 @command('debugcvsps',
397 @command('debugcvsps',
398 [
398 [
399 # Main options shared with cvsps-2.1
399 # Main options shared with cvsps-2.1
400 ('b', 'branches', [], _('only return changes on specified branches')),
400 ('b', 'branches', [], _('only return changes on specified branches')),
401 ('p', 'prefix', '', _('prefix to remove from file names')),
401 ('p', 'prefix', '', _('prefix to remove from file names')),
402 ('r', 'revisions', [],
402 ('r', 'revisions', [],
403 _('only return changes after or between specified tags')),
403 _('only return changes after or between specified tags')),
404 ('u', 'update-cache', None, _("update cvs log cache")),
404 ('u', 'update-cache', None, _("update cvs log cache")),
405 ('x', 'new-cache', None, _("create new cvs log cache")),
405 ('x', 'new-cache', None, _("create new cvs log cache")),
406 ('z', 'fuzz', 60, _('set commit time fuzz in seconds')),
406 ('z', 'fuzz', 60, _('set commit time fuzz in seconds')),
407 ('', 'root', '', _('specify cvsroot')),
407 ('', 'root', '', _('specify cvsroot')),
408 # Options specific to builtin cvsps
408 # Options specific to builtin cvsps
409 ('', 'parents', '', _('show parent changesets')),
409 ('', 'parents', '', _('show parent changesets')),
410 ('', 'ancestors', '', _('show current changeset in ancestor branches')),
410 ('', 'ancestors', '', _('show current changeset in ancestor branches')),
411 # Options that are ignored for compatibility with cvsps-2.1
411 # Options that are ignored for compatibility with cvsps-2.1
412 ('A', 'cvs-direct', None, _('ignored for compatibility')),
412 ('A', 'cvs-direct', None, _('ignored for compatibility')),
413 ],
413 ],
414 _('hg debugcvsps [OPTION]... [PATH]...'),
414 _('hg debugcvsps [OPTION]... [PATH]...'),
415 norepo=True)
415 norepo=True)
416 def debugcvsps(ui, *args, **opts):
416 def debugcvsps(ui, *args, **opts):
417 '''create changeset information from CVS
417 '''create changeset information from CVS
418
418
419 This command is intended as a debugging tool for the CVS to
419 This command is intended as a debugging tool for the CVS to
420 Mercurial converter, and can be used as a direct replacement for
420 Mercurial converter, and can be used as a direct replacement for
421 cvsps.
421 cvsps.
422
422
423 Hg debugcvsps reads the CVS rlog for current directory (or any
423 Hg debugcvsps reads the CVS rlog for current directory (or any
424 named directory) in the CVS repository, and converts the log to a
424 named directory) in the CVS repository, and converts the log to a
425 series of changesets based on matching commit log entries and
425 series of changesets based on matching commit log entries and
426 dates.'''
426 dates.'''
427 return cvsps.debugcvsps(ui, *args, **opts)
427 return cvsps.debugcvsps(ui, *args, **opts)
428
428
429 def kwconverted(ctx, name):
429 def kwconverted(ctx, name):
430 rev = ctx.extra().get('convert_revision', '')
430 rev = ctx.extra().get('convert_revision', '')
431 if rev.startswith('svn:'):
431 if rev.startswith('svn:'):
432 if name == 'svnrev':
432 if name == 'svnrev':
433 return str(subversion.revsplit(rev)[2])
433 return str(subversion.revsplit(rev)[2])
434 elif name == 'svnpath':
434 elif name == 'svnpath':
435 return subversion.revsplit(rev)[1]
435 return subversion.revsplit(rev)[1]
436 elif name == 'svnuuid':
436 elif name == 'svnuuid':
437 return subversion.revsplit(rev)[0]
437 return subversion.revsplit(rev)[0]
438 return rev
438 return rev
439
439
440 templatekeyword = registrar.templatekeyword()
440 templatekeyword = registrar.templatekeyword()
441
441
442 @templatekeyword('svnrev')
442 @templatekeyword('svnrev')
443 def kwsvnrev(repo, ctx, **args):
443 def kwsvnrev(repo, ctx, **args):
444 """String. Converted subversion revision number."""
444 """String. Converted subversion revision number."""
445 return kwconverted(ctx, 'svnrev')
445 return kwconverted(ctx, 'svnrev')
446
446
447 @templatekeyword('svnpath')
447 @templatekeyword('svnpath')
448 def kwsvnpath(repo, ctx, **args):
448 def kwsvnpath(repo, ctx, **args):
449 """String. Converted subversion revision project path."""
449 """String. Converted subversion revision project path."""
450 return kwconverted(ctx, 'svnpath')
450 return kwconverted(ctx, 'svnpath')
451
451
452 @templatekeyword('svnuuid')
452 @templatekeyword('svnuuid')
453 def kwsvnuuid(repo, ctx, **args):
453 def kwsvnuuid(repo, ctx, **args):
454 """String. Converted subversion revision repository identifier."""
454 """String. Converted subversion revision repository identifier."""
455 return kwconverted(ctx, 'svnuuid')
455 return kwconverted(ctx, 'svnuuid')
456
456
457 # tell hggettext to extract docstrings from these functions:
457 # tell hggettext to extract docstrings from these functions:
458 i18nfunctions = [kwsvnrev, kwsvnpath, kwsvnuuid]
458 i18nfunctions = [kwsvnrev, kwsvnpath, kwsvnuuid]
@@ -1,363 +1,363 b''
1 """automatically manage newlines in repository files
1 """automatically manage newlines in repository files
2
2
3 This extension allows you to manage the type of line endings (CRLF or
3 This extension allows you to manage the type of line endings (CRLF or
4 LF) that are used in the repository and in the local working
4 LF) that are used in the repository and in the local working
5 directory. That way you can get CRLF line endings on Windows and LF on
5 directory. That way you can get CRLF line endings on Windows and LF on
6 Unix/Mac, thereby letting everybody use their OS native line endings.
6 Unix/Mac, thereby letting everybody use their OS native line endings.
7
7
8 The extension reads its configuration from a versioned ``.hgeol``
8 The extension reads its configuration from a versioned ``.hgeol``
9 configuration file found in the root of the working directory. The
9 configuration file found in the root of the working directory. The
10 ``.hgeol`` file use the same syntax as all other Mercurial
10 ``.hgeol`` file use the same syntax as all other Mercurial
11 configuration files. It uses two sections, ``[patterns]`` and
11 configuration files. It uses two sections, ``[patterns]`` and
12 ``[repository]``.
12 ``[repository]``.
13
13
14 The ``[patterns]`` section specifies how line endings should be
14 The ``[patterns]`` section specifies how line endings should be
15 converted between the working directory and the repository. The format is
15 converted between the working directory and the repository. The format is
16 specified by a file pattern. The first match is used, so put more
16 specified by a file pattern. The first match is used, so put more
17 specific patterns first. The available line endings are ``LF``,
17 specific patterns first. The available line endings are ``LF``,
18 ``CRLF``, and ``BIN``.
18 ``CRLF``, and ``BIN``.
19
19
20 Files with the declared format of ``CRLF`` or ``LF`` are always
20 Files with the declared format of ``CRLF`` or ``LF`` are always
21 checked out and stored in the repository in that format and files
21 checked out and stored in the repository in that format and files
22 declared to be binary (``BIN``) are left unchanged. Additionally,
22 declared to be binary (``BIN``) are left unchanged. Additionally,
23 ``native`` is an alias for checking out in the platform's default line
23 ``native`` is an alias for checking out in the platform's default line
24 ending: ``LF`` on Unix (including Mac OS X) and ``CRLF`` on
24 ending: ``LF`` on Unix (including Mac OS X) and ``CRLF`` on
25 Windows. Note that ``BIN`` (do nothing to line endings) is Mercurial's
25 Windows. Note that ``BIN`` (do nothing to line endings) is Mercurial's
26 default behavior; it is only needed if you need to override a later,
26 default behavior; it is only needed if you need to override a later,
27 more general pattern.
27 more general pattern.
28
28
29 The optional ``[repository]`` section specifies the line endings to
29 The optional ``[repository]`` section specifies the line endings to
30 use for files stored in the repository. It has a single setting,
30 use for files stored in the repository. It has a single setting,
31 ``native``, which determines the storage line endings for files
31 ``native``, which determines the storage line endings for files
32 declared as ``native`` in the ``[patterns]`` section. It can be set to
32 declared as ``native`` in the ``[patterns]`` section. It can be set to
33 ``LF`` or ``CRLF``. The default is ``LF``. For example, this means
33 ``LF`` or ``CRLF``. The default is ``LF``. For example, this means
34 that on Windows, files configured as ``native`` (``CRLF`` by default)
34 that on Windows, files configured as ``native`` (``CRLF`` by default)
35 will be converted to ``LF`` when stored in the repository. Files
35 will be converted to ``LF`` when stored in the repository. Files
36 declared as ``LF``, ``CRLF``, or ``BIN`` in the ``[patterns]`` section
36 declared as ``LF``, ``CRLF``, or ``BIN`` in the ``[patterns]`` section
37 are always stored as-is in the repository.
37 are always stored as-is in the repository.
38
38
39 Example versioned ``.hgeol`` file::
39 Example versioned ``.hgeol`` file::
40
40
41 [patterns]
41 [patterns]
42 **.py = native
42 **.py = native
43 **.vcproj = CRLF
43 **.vcproj = CRLF
44 **.txt = native
44 **.txt = native
45 Makefile = LF
45 Makefile = LF
46 **.jpg = BIN
46 **.jpg = BIN
47
47
48 [repository]
48 [repository]
49 native = LF
49 native = LF
50
50
51 .. note::
51 .. note::
52
52
53 The rules will first apply when files are touched in the working
53 The rules will first apply when files are touched in the working
54 directory, e.g. by updating to null and back to tip to touch all files.
54 directory, e.g. by updating to null and back to tip to touch all files.
55
55
56 The extension uses an optional ``[eol]`` section read from both the
56 The extension uses an optional ``[eol]`` section read from both the
57 normal Mercurial configuration files and the ``.hgeol`` file, with the
57 normal Mercurial configuration files and the ``.hgeol`` file, with the
58 latter overriding the former. You can use that section to control the
58 latter overriding the former. You can use that section to control the
59 overall behavior. There are three settings:
59 overall behavior. There are three settings:
60
60
61 - ``eol.native`` (default ``os.linesep``) can be set to ``LF`` or
61 - ``eol.native`` (default ``os.linesep``) can be set to ``LF`` or
62 ``CRLF`` to override the default interpretation of ``native`` for
62 ``CRLF`` to override the default interpretation of ``native`` for
63 checkout. This can be used with :hg:`archive` on Unix, say, to
63 checkout. This can be used with :hg:`archive` on Unix, say, to
64 generate an archive where files have line endings for Windows.
64 generate an archive where files have line endings for Windows.
65
65
66 - ``eol.only-consistent`` (default True) can be set to False to make
66 - ``eol.only-consistent`` (default True) can be set to False to make
67 the extension convert files with inconsistent EOLs. Inconsistent
67 the extension convert files with inconsistent EOLs. Inconsistent
68 means that there is both ``CRLF`` and ``LF`` present in the file.
68 means that there is both ``CRLF`` and ``LF`` present in the file.
69 Such files are normally not touched under the assumption that they
69 Such files are normally not touched under the assumption that they
70 have mixed EOLs on purpose.
70 have mixed EOLs on purpose.
71
71
72 - ``eol.fix-trailing-newline`` (default False) can be set to True to
72 - ``eol.fix-trailing-newline`` (default False) can be set to True to
73 ensure that converted files end with a EOL character (either ``\\n``
73 ensure that converted files end with a EOL character (either ``\\n``
74 or ``\\r\\n`` as per the configured patterns).
74 or ``\\r\\n`` as per the configured patterns).
75
75
76 The extension provides ``cleverencode:`` and ``cleverdecode:`` filters
76 The extension provides ``cleverencode:`` and ``cleverdecode:`` filters
77 like the deprecated win32text extension does. This means that you can
77 like the deprecated win32text extension does. This means that you can
78 disable win32text and enable eol and your filters will still work. You
78 disable win32text and enable eol and your filters will still work. You
79 only need to these filters until you have prepared a ``.hgeol`` file.
79 only need to these filters until you have prepared a ``.hgeol`` file.
80
80
81 The ``win32text.forbid*`` hooks provided by the win32text extension
81 The ``win32text.forbid*`` hooks provided by the win32text extension
82 have been unified into a single hook named ``eol.checkheadshook``. The
82 have been unified into a single hook named ``eol.checkheadshook``. The
83 hook will lookup the expected line endings from the ``.hgeol`` file,
83 hook will lookup the expected line endings from the ``.hgeol`` file,
84 which means you must migrate to a ``.hgeol`` file first before using
84 which means you must migrate to a ``.hgeol`` file first before using
85 the hook. ``eol.checkheadshook`` only checks heads, intermediate
85 the hook. ``eol.checkheadshook`` only checks heads, intermediate
86 invalid revisions will be pushed. To forbid them completely, use the
86 invalid revisions will be pushed. To forbid them completely, use the
87 ``eol.checkallhook`` hook. These hooks are best used as
87 ``eol.checkallhook`` hook. These hooks are best used as
88 ``pretxnchangegroup`` hooks.
88 ``pretxnchangegroup`` hooks.
89
89
90 See :hg:`help patterns` for more information about the glob patterns
90 See :hg:`help patterns` for more information about the glob patterns
91 used.
91 used.
92 """
92 """
93
93
94 from __future__ import absolute_import
94 from __future__ import absolute_import
95
95
96 import os
96 import os
97 import re
97 import re
98 from mercurial.i18n import _
98 from mercurial.i18n import _
99 from mercurial import (
99 from mercurial import (
100 config,
100 config,
101 error,
101 error,
102 extensions,
102 extensions,
103 match,
103 match,
104 util,
104 util,
105 )
105 )
106
106
107 # Note for extension authors: ONLY specify testedwith = 'internal' for
107 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
108 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
108 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
109 # be specifying the version(s) of Mercurial they are tested with, or
109 # be specifying the version(s) of Mercurial they are tested with, or
110 # leave the attribute unspecified.
110 # leave the attribute unspecified.
111 testedwith = 'internal'
111 testedwith = 'ships-with-hg-core'
112
112
113 # Matches a lone LF, i.e., one that is not part of CRLF.
113 # Matches a lone LF, i.e., one that is not part of CRLF.
114 singlelf = re.compile('(^|[^\r])\n')
114 singlelf = re.compile('(^|[^\r])\n')
115 # Matches a single EOL which can either be a CRLF where repeated CR
115 # Matches a single EOL which can either be a CRLF where repeated CR
116 # are removed or a LF. We do not care about old Macintosh files, so a
116 # are removed or a LF. We do not care about old Macintosh files, so a
117 # stray CR is an error.
117 # stray CR is an error.
118 eolre = re.compile('\r*\n')
118 eolre = re.compile('\r*\n')
119
119
120
120
121 def inconsistenteol(data):
121 def inconsistenteol(data):
122 return '\r\n' in data and singlelf.search(data)
122 return '\r\n' in data and singlelf.search(data)
123
123
124 def tolf(s, params, ui, **kwargs):
124 def tolf(s, params, ui, **kwargs):
125 """Filter to convert to LF EOLs."""
125 """Filter to convert to LF EOLs."""
126 if util.binary(s):
126 if util.binary(s):
127 return s
127 return s
128 if ui.configbool('eol', 'only-consistent', True) and inconsistenteol(s):
128 if ui.configbool('eol', 'only-consistent', True) and inconsistenteol(s):
129 return s
129 return s
130 if (ui.configbool('eol', 'fix-trailing-newline', False)
130 if (ui.configbool('eol', 'fix-trailing-newline', False)
131 and s and s[-1] != '\n'):
131 and s and s[-1] != '\n'):
132 s = s + '\n'
132 s = s + '\n'
133 return eolre.sub('\n', s)
133 return eolre.sub('\n', s)
134
134
135 def tocrlf(s, params, ui, **kwargs):
135 def tocrlf(s, params, ui, **kwargs):
136 """Filter to convert to CRLF EOLs."""
136 """Filter to convert to CRLF EOLs."""
137 if util.binary(s):
137 if util.binary(s):
138 return s
138 return s
139 if ui.configbool('eol', 'only-consistent', True) and inconsistenteol(s):
139 if ui.configbool('eol', 'only-consistent', True) and inconsistenteol(s):
140 return s
140 return s
141 if (ui.configbool('eol', 'fix-trailing-newline', False)
141 if (ui.configbool('eol', 'fix-trailing-newline', False)
142 and s and s[-1] != '\n'):
142 and s and s[-1] != '\n'):
143 s = s + '\n'
143 s = s + '\n'
144 return eolre.sub('\r\n', s)
144 return eolre.sub('\r\n', s)
145
145
146 def isbinary(s, params):
146 def isbinary(s, params):
147 """Filter to do nothing with the file."""
147 """Filter to do nothing with the file."""
148 return s
148 return s
149
149
150 filters = {
150 filters = {
151 'to-lf': tolf,
151 'to-lf': tolf,
152 'to-crlf': tocrlf,
152 'to-crlf': tocrlf,
153 'is-binary': isbinary,
153 'is-binary': isbinary,
154 # The following provide backwards compatibility with win32text
154 # The following provide backwards compatibility with win32text
155 'cleverencode:': tolf,
155 'cleverencode:': tolf,
156 'cleverdecode:': tocrlf
156 'cleverdecode:': tocrlf
157 }
157 }
158
158
159 class eolfile(object):
159 class eolfile(object):
160 def __init__(self, ui, root, data):
160 def __init__(self, ui, root, data):
161 self._decode = {'LF': 'to-lf', 'CRLF': 'to-crlf', 'BIN': 'is-binary'}
161 self._decode = {'LF': 'to-lf', 'CRLF': 'to-crlf', 'BIN': 'is-binary'}
162 self._encode = {'LF': 'to-lf', 'CRLF': 'to-crlf', 'BIN': 'is-binary'}
162 self._encode = {'LF': 'to-lf', 'CRLF': 'to-crlf', 'BIN': 'is-binary'}
163
163
164 self.cfg = config.config()
164 self.cfg = config.config()
165 # Our files should not be touched. The pattern must be
165 # Our files should not be touched. The pattern must be
166 # inserted first override a '** = native' pattern.
166 # inserted first override a '** = native' pattern.
167 self.cfg.set('patterns', '.hg*', 'BIN', 'eol')
167 self.cfg.set('patterns', '.hg*', 'BIN', 'eol')
168 # We can then parse the user's patterns.
168 # We can then parse the user's patterns.
169 self.cfg.parse('.hgeol', data)
169 self.cfg.parse('.hgeol', data)
170
170
171 isrepolf = self.cfg.get('repository', 'native') != 'CRLF'
171 isrepolf = self.cfg.get('repository', 'native') != 'CRLF'
172 self._encode['NATIVE'] = isrepolf and 'to-lf' or 'to-crlf'
172 self._encode['NATIVE'] = isrepolf and 'to-lf' or 'to-crlf'
173 iswdlf = ui.config('eol', 'native', os.linesep) in ('LF', '\n')
173 iswdlf = ui.config('eol', 'native', os.linesep) in ('LF', '\n')
174 self._decode['NATIVE'] = iswdlf and 'to-lf' or 'to-crlf'
174 self._decode['NATIVE'] = iswdlf and 'to-lf' or 'to-crlf'
175
175
176 include = []
176 include = []
177 exclude = []
177 exclude = []
178 for pattern, style in self.cfg.items('patterns'):
178 for pattern, style in self.cfg.items('patterns'):
179 key = style.upper()
179 key = style.upper()
180 if key == 'BIN':
180 if key == 'BIN':
181 exclude.append(pattern)
181 exclude.append(pattern)
182 else:
182 else:
183 include.append(pattern)
183 include.append(pattern)
184 # This will match the files for which we need to care
184 # This will match the files for which we need to care
185 # about inconsistent newlines.
185 # about inconsistent newlines.
186 self.match = match.match(root, '', [], include, exclude)
186 self.match = match.match(root, '', [], include, exclude)
187
187
188 def copytoui(self, ui):
188 def copytoui(self, ui):
189 for pattern, style in self.cfg.items('patterns'):
189 for pattern, style in self.cfg.items('patterns'):
190 key = style.upper()
190 key = style.upper()
191 try:
191 try:
192 ui.setconfig('decode', pattern, self._decode[key], 'eol')
192 ui.setconfig('decode', pattern, self._decode[key], 'eol')
193 ui.setconfig('encode', pattern, self._encode[key], 'eol')
193 ui.setconfig('encode', pattern, self._encode[key], 'eol')
194 except KeyError:
194 except KeyError:
195 ui.warn(_("ignoring unknown EOL style '%s' from %s\n")
195 ui.warn(_("ignoring unknown EOL style '%s' from %s\n")
196 % (style, self.cfg.source('patterns', pattern)))
196 % (style, self.cfg.source('patterns', pattern)))
197 # eol.only-consistent can be specified in ~/.hgrc or .hgeol
197 # eol.only-consistent can be specified in ~/.hgrc or .hgeol
198 for k, v in self.cfg.items('eol'):
198 for k, v in self.cfg.items('eol'):
199 ui.setconfig('eol', k, v, 'eol')
199 ui.setconfig('eol', k, v, 'eol')
200
200
201 def checkrev(self, repo, ctx, files):
201 def checkrev(self, repo, ctx, files):
202 failed = []
202 failed = []
203 for f in (files or ctx.files()):
203 for f in (files or ctx.files()):
204 if f not in ctx:
204 if f not in ctx:
205 continue
205 continue
206 for pattern, style in self.cfg.items('patterns'):
206 for pattern, style in self.cfg.items('patterns'):
207 if not match.match(repo.root, '', [pattern])(f):
207 if not match.match(repo.root, '', [pattern])(f):
208 continue
208 continue
209 target = self._encode[style.upper()]
209 target = self._encode[style.upper()]
210 data = ctx[f].data()
210 data = ctx[f].data()
211 if (target == "to-lf" and "\r\n" in data
211 if (target == "to-lf" and "\r\n" in data
212 or target == "to-crlf" and singlelf.search(data)):
212 or target == "to-crlf" and singlelf.search(data)):
213 failed.append((f, target, str(ctx)))
213 failed.append((f, target, str(ctx)))
214 break
214 break
215 return failed
215 return failed
216
216
217 def parseeol(ui, repo, nodes):
217 def parseeol(ui, repo, nodes):
218 try:
218 try:
219 for node in nodes:
219 for node in nodes:
220 try:
220 try:
221 if node is None:
221 if node is None:
222 # Cannot use workingctx.data() since it would load
222 # Cannot use workingctx.data() since it would load
223 # and cache the filters before we configure them.
223 # and cache the filters before we configure them.
224 data = repo.wfile('.hgeol').read()
224 data = repo.wfile('.hgeol').read()
225 else:
225 else:
226 data = repo[node]['.hgeol'].data()
226 data = repo[node]['.hgeol'].data()
227 return eolfile(ui, repo.root, data)
227 return eolfile(ui, repo.root, data)
228 except (IOError, LookupError):
228 except (IOError, LookupError):
229 pass
229 pass
230 except error.ParseError as inst:
230 except error.ParseError as inst:
231 ui.warn(_("warning: ignoring .hgeol file due to parse error "
231 ui.warn(_("warning: ignoring .hgeol file due to parse error "
232 "at %s: %s\n") % (inst.args[1], inst.args[0]))
232 "at %s: %s\n") % (inst.args[1], inst.args[0]))
233 return None
233 return None
234
234
235 def _checkhook(ui, repo, node, headsonly):
235 def _checkhook(ui, repo, node, headsonly):
236 # Get revisions to check and touched files at the same time
236 # Get revisions to check and touched files at the same time
237 files = set()
237 files = set()
238 revs = set()
238 revs = set()
239 for rev in xrange(repo[node].rev(), len(repo)):
239 for rev in xrange(repo[node].rev(), len(repo)):
240 revs.add(rev)
240 revs.add(rev)
241 if headsonly:
241 if headsonly:
242 ctx = repo[rev]
242 ctx = repo[rev]
243 files.update(ctx.files())
243 files.update(ctx.files())
244 for pctx in ctx.parents():
244 for pctx in ctx.parents():
245 revs.discard(pctx.rev())
245 revs.discard(pctx.rev())
246 failed = []
246 failed = []
247 for rev in revs:
247 for rev in revs:
248 ctx = repo[rev]
248 ctx = repo[rev]
249 eol = parseeol(ui, repo, [ctx.node()])
249 eol = parseeol(ui, repo, [ctx.node()])
250 if eol:
250 if eol:
251 failed.extend(eol.checkrev(repo, ctx, files))
251 failed.extend(eol.checkrev(repo, ctx, files))
252
252
253 if failed:
253 if failed:
254 eols = {'to-lf': 'CRLF', 'to-crlf': 'LF'}
254 eols = {'to-lf': 'CRLF', 'to-crlf': 'LF'}
255 msgs = []
255 msgs = []
256 for f, target, node in sorted(failed):
256 for f, target, node in sorted(failed):
257 msgs.append(_(" %s in %s should not have %s line endings") %
257 msgs.append(_(" %s in %s should not have %s line endings") %
258 (f, node, eols[target]))
258 (f, node, eols[target]))
259 raise error.Abort(_("end-of-line check failed:\n") + "\n".join(msgs))
259 raise error.Abort(_("end-of-line check failed:\n") + "\n".join(msgs))
260
260
261 def checkallhook(ui, repo, node, hooktype, **kwargs):
261 def checkallhook(ui, repo, node, hooktype, **kwargs):
262 """verify that files have expected EOLs"""
262 """verify that files have expected EOLs"""
263 _checkhook(ui, repo, node, False)
263 _checkhook(ui, repo, node, False)
264
264
265 def checkheadshook(ui, repo, node, hooktype, **kwargs):
265 def checkheadshook(ui, repo, node, hooktype, **kwargs):
266 """verify that files have expected EOLs"""
266 """verify that files have expected EOLs"""
267 _checkhook(ui, repo, node, True)
267 _checkhook(ui, repo, node, True)
268
268
269 # "checkheadshook" used to be called "hook"
269 # "checkheadshook" used to be called "hook"
270 hook = checkheadshook
270 hook = checkheadshook
271
271
272 def preupdate(ui, repo, hooktype, parent1, parent2):
272 def preupdate(ui, repo, hooktype, parent1, parent2):
273 repo.loadeol([parent1])
273 repo.loadeol([parent1])
274 return False
274 return False
275
275
276 def uisetup(ui):
276 def uisetup(ui):
277 ui.setconfig('hooks', 'preupdate.eol', preupdate, 'eol')
277 ui.setconfig('hooks', 'preupdate.eol', preupdate, 'eol')
278
278
279 def extsetup(ui):
279 def extsetup(ui):
280 try:
280 try:
281 extensions.find('win32text')
281 extensions.find('win32text')
282 ui.warn(_("the eol extension is incompatible with the "
282 ui.warn(_("the eol extension is incompatible with the "
283 "win32text extension\n"))
283 "win32text extension\n"))
284 except KeyError:
284 except KeyError:
285 pass
285 pass
286
286
287
287
288 def reposetup(ui, repo):
288 def reposetup(ui, repo):
289 uisetup(repo.ui)
289 uisetup(repo.ui)
290
290
291 if not repo.local():
291 if not repo.local():
292 return
292 return
293 for name, fn in filters.iteritems():
293 for name, fn in filters.iteritems():
294 repo.adddatafilter(name, fn)
294 repo.adddatafilter(name, fn)
295
295
296 ui.setconfig('patch', 'eol', 'auto', 'eol')
296 ui.setconfig('patch', 'eol', 'auto', 'eol')
297
297
298 class eolrepo(repo.__class__):
298 class eolrepo(repo.__class__):
299
299
300 def loadeol(self, nodes):
300 def loadeol(self, nodes):
301 eol = parseeol(self.ui, self, nodes)
301 eol = parseeol(self.ui, self, nodes)
302 if eol is None:
302 if eol is None:
303 return None
303 return None
304 eol.copytoui(self.ui)
304 eol.copytoui(self.ui)
305 return eol.match
305 return eol.match
306
306
307 def _hgcleardirstate(self):
307 def _hgcleardirstate(self):
308 self._eolfile = self.loadeol([None, 'tip'])
308 self._eolfile = self.loadeol([None, 'tip'])
309 if not self._eolfile:
309 if not self._eolfile:
310 self._eolfile = util.never
310 self._eolfile = util.never
311 return
311 return
312
312
313 try:
313 try:
314 cachemtime = os.path.getmtime(self.join("eol.cache"))
314 cachemtime = os.path.getmtime(self.join("eol.cache"))
315 except OSError:
315 except OSError:
316 cachemtime = 0
316 cachemtime = 0
317
317
318 try:
318 try:
319 eolmtime = os.path.getmtime(self.wjoin(".hgeol"))
319 eolmtime = os.path.getmtime(self.wjoin(".hgeol"))
320 except OSError:
320 except OSError:
321 eolmtime = 0
321 eolmtime = 0
322
322
323 if eolmtime > cachemtime:
323 if eolmtime > cachemtime:
324 self.ui.debug("eol: detected change in .hgeol\n")
324 self.ui.debug("eol: detected change in .hgeol\n")
325 wlock = None
325 wlock = None
326 try:
326 try:
327 wlock = self.wlock()
327 wlock = self.wlock()
328 for f in self.dirstate:
328 for f in self.dirstate:
329 if self.dirstate[f] == 'n':
329 if self.dirstate[f] == 'n':
330 # all normal files need to be looked at
330 # all normal files need to be looked at
331 # again since the new .hgeol file might no
331 # again since the new .hgeol file might no
332 # longer match a file it matched before
332 # longer match a file it matched before
333 self.dirstate.normallookup(f)
333 self.dirstate.normallookup(f)
334 # Create or touch the cache to update mtime
334 # Create or touch the cache to update mtime
335 self.vfs("eol.cache", "w").close()
335 self.vfs("eol.cache", "w").close()
336 wlock.release()
336 wlock.release()
337 except error.LockUnavailable:
337 except error.LockUnavailable:
338 # If we cannot lock the repository and clear the
338 # If we cannot lock the repository and clear the
339 # dirstate, then a commit might not see all files
339 # dirstate, then a commit might not see all files
340 # as modified. But if we cannot lock the
340 # as modified. But if we cannot lock the
341 # repository, then we can also not make a commit,
341 # repository, then we can also not make a commit,
342 # so ignore the error.
342 # so ignore the error.
343 pass
343 pass
344
344
345 def commitctx(self, ctx, haserror=False):
345 def commitctx(self, ctx, haserror=False):
346 for f in sorted(ctx.added() + ctx.modified()):
346 for f in sorted(ctx.added() + ctx.modified()):
347 if not self._eolfile(f):
347 if not self._eolfile(f):
348 continue
348 continue
349 fctx = ctx[f]
349 fctx = ctx[f]
350 if fctx is None:
350 if fctx is None:
351 continue
351 continue
352 data = fctx.data()
352 data = fctx.data()
353 if util.binary(data):
353 if util.binary(data):
354 # We should not abort here, since the user should
354 # We should not abort here, since the user should
355 # be able to say "** = native" to automatically
355 # be able to say "** = native" to automatically
356 # have all non-binary files taken care of.
356 # have all non-binary files taken care of.
357 continue
357 continue
358 if inconsistenteol(data):
358 if inconsistenteol(data):
359 raise error.Abort(_("inconsistent newline style "
359 raise error.Abort(_("inconsistent newline style "
360 "in %s\n") % f)
360 "in %s\n") % f)
361 return super(eolrepo, self).commitctx(ctx, haserror)
361 return super(eolrepo, self).commitctx(ctx, haserror)
362 repo.__class__ = eolrepo
362 repo.__class__ = eolrepo
363 repo._hgcleardirstate()
363 repo._hgcleardirstate()
@@ -1,392 +1,392 b''
1 # extdiff.py - external diff program support for mercurial
1 # extdiff.py - external diff program support for mercurial
2 #
2 #
3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 '''command to allow external programs to compare revisions
8 '''command to allow external programs to compare revisions
9
9
10 The extdiff Mercurial extension allows you to use external programs
10 The extdiff Mercurial extension allows you to use external programs
11 to compare revisions, or revision with working directory. The external
11 to compare revisions, or revision with working directory. The external
12 diff programs are called with a configurable set of options and two
12 diff programs are called with a configurable set of options and two
13 non-option arguments: paths to directories containing snapshots of
13 non-option arguments: paths to directories containing snapshots of
14 files to compare.
14 files to compare.
15
15
16 The extdiff extension also allows you to configure new diff commands, so
16 The extdiff extension also allows you to configure new diff commands, so
17 you do not need to type :hg:`extdiff -p kdiff3` always. ::
17 you do not need to type :hg:`extdiff -p kdiff3` always. ::
18
18
19 [extdiff]
19 [extdiff]
20 # add new command that runs GNU diff(1) in 'context diff' mode
20 # add new command that runs GNU diff(1) in 'context diff' mode
21 cdiff = gdiff -Nprc5
21 cdiff = gdiff -Nprc5
22 ## or the old way:
22 ## or the old way:
23 #cmd.cdiff = gdiff
23 #cmd.cdiff = gdiff
24 #opts.cdiff = -Nprc5
24 #opts.cdiff = -Nprc5
25
25
26 # add new command called meld, runs meld (no need to name twice). If
26 # add new command called meld, runs meld (no need to name twice). If
27 # the meld executable is not available, the meld tool in [merge-tools]
27 # the meld executable is not available, the meld tool in [merge-tools]
28 # will be used, if available
28 # will be used, if available
29 meld =
29 meld =
30
30
31 # add new command called vimdiff, runs gvimdiff with DirDiff plugin
31 # add new command called vimdiff, runs gvimdiff with DirDiff plugin
32 # (see http://www.vim.org/scripts/script.php?script_id=102) Non
32 # (see http://www.vim.org/scripts/script.php?script_id=102) Non
33 # English user, be sure to put "let g:DirDiffDynamicDiffText = 1" in
33 # English user, be sure to put "let g:DirDiffDynamicDiffText = 1" in
34 # your .vimrc
34 # your .vimrc
35 vimdiff = gvim -f "+next" \\
35 vimdiff = gvim -f "+next" \\
36 "+execute 'DirDiff' fnameescape(argv(0)) fnameescape(argv(1))"
36 "+execute 'DirDiff' fnameescape(argv(0)) fnameescape(argv(1))"
37
37
38 Tool arguments can include variables that are expanded at runtime::
38 Tool arguments can include variables that are expanded at runtime::
39
39
40 $parent1, $plabel1 - filename, descriptive label of first parent
40 $parent1, $plabel1 - filename, descriptive label of first parent
41 $child, $clabel - filename, descriptive label of child revision
41 $child, $clabel - filename, descriptive label of child revision
42 $parent2, $plabel2 - filename, descriptive label of second parent
42 $parent2, $plabel2 - filename, descriptive label of second parent
43 $root - repository root
43 $root - repository root
44 $parent is an alias for $parent1.
44 $parent is an alias for $parent1.
45
45
46 The extdiff extension will look in your [diff-tools] and [merge-tools]
46 The extdiff extension will look in your [diff-tools] and [merge-tools]
47 sections for diff tool arguments, when none are specified in [extdiff].
47 sections for diff tool arguments, when none are specified in [extdiff].
48
48
49 ::
49 ::
50
50
51 [extdiff]
51 [extdiff]
52 kdiff3 =
52 kdiff3 =
53
53
54 [diff-tools]
54 [diff-tools]
55 kdiff3.diffargs=--L1 '$plabel1' --L2 '$clabel' $parent $child
55 kdiff3.diffargs=--L1 '$plabel1' --L2 '$clabel' $parent $child
56
56
57 You can use -I/-X and list of file or directory names like normal
57 You can use -I/-X and list of file or directory names like normal
58 :hg:`diff` command. The extdiff extension makes snapshots of only
58 :hg:`diff` command. The extdiff extension makes snapshots of only
59 needed files, so running the external diff program will actually be
59 needed files, so running the external diff program will actually be
60 pretty fast (at least faster than having to compare the entire tree).
60 pretty fast (at least faster than having to compare the entire tree).
61 '''
61 '''
62
62
63 from __future__ import absolute_import
63 from __future__ import absolute_import
64
64
65 import os
65 import os
66 import re
66 import re
67 import shlex
67 import shlex
68 import shutil
68 import shutil
69 import tempfile
69 import tempfile
70 from mercurial.i18n import _
70 from mercurial.i18n import _
71 from mercurial.node import (
71 from mercurial.node import (
72 nullid,
72 nullid,
73 short,
73 short,
74 )
74 )
75 from mercurial import (
75 from mercurial import (
76 archival,
76 archival,
77 cmdutil,
77 cmdutil,
78 commands,
78 commands,
79 error,
79 error,
80 filemerge,
80 filemerge,
81 scmutil,
81 scmutil,
82 util,
82 util,
83 )
83 )
84
84
85 cmdtable = {}
85 cmdtable = {}
86 command = cmdutil.command(cmdtable)
86 command = cmdutil.command(cmdtable)
87 # Note for extension authors: ONLY specify testedwith = 'internal' for
87 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
88 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
88 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
89 # be specifying the version(s) of Mercurial they are tested with, or
89 # be specifying the version(s) of Mercurial they are tested with, or
90 # leave the attribute unspecified.
90 # leave the attribute unspecified.
91 testedwith = 'internal'
91 testedwith = 'ships-with-hg-core'
92
92
93 def snapshot(ui, repo, files, node, tmproot, listsubrepos):
93 def snapshot(ui, repo, files, node, tmproot, listsubrepos):
94 '''snapshot files as of some revision
94 '''snapshot files as of some revision
95 if not using snapshot, -I/-X does not work and recursive diff
95 if not using snapshot, -I/-X does not work and recursive diff
96 in tools like kdiff3 and meld displays too many files.'''
96 in tools like kdiff3 and meld displays too many files.'''
97 dirname = os.path.basename(repo.root)
97 dirname = os.path.basename(repo.root)
98 if dirname == "":
98 if dirname == "":
99 dirname = "root"
99 dirname = "root"
100 if node is not None:
100 if node is not None:
101 dirname = '%s.%s' % (dirname, short(node))
101 dirname = '%s.%s' % (dirname, short(node))
102 base = os.path.join(tmproot, dirname)
102 base = os.path.join(tmproot, dirname)
103 os.mkdir(base)
103 os.mkdir(base)
104 fns_and_mtime = []
104 fns_and_mtime = []
105
105
106 if node is not None:
106 if node is not None:
107 ui.note(_('making snapshot of %d files from rev %s\n') %
107 ui.note(_('making snapshot of %d files from rev %s\n') %
108 (len(files), short(node)))
108 (len(files), short(node)))
109 else:
109 else:
110 ui.note(_('making snapshot of %d files from working directory\n') %
110 ui.note(_('making snapshot of %d files from working directory\n') %
111 (len(files)))
111 (len(files)))
112
112
113 if files:
113 if files:
114 repo.ui.setconfig("ui", "archivemeta", False)
114 repo.ui.setconfig("ui", "archivemeta", False)
115
115
116 archival.archive(repo, base, node, 'files',
116 archival.archive(repo, base, node, 'files',
117 matchfn=scmutil.matchfiles(repo, files),
117 matchfn=scmutil.matchfiles(repo, files),
118 subrepos=listsubrepos)
118 subrepos=listsubrepos)
119
119
120 for fn in sorted(files):
120 for fn in sorted(files):
121 wfn = util.pconvert(fn)
121 wfn = util.pconvert(fn)
122 ui.note(' %s\n' % wfn)
122 ui.note(' %s\n' % wfn)
123
123
124 if node is None:
124 if node is None:
125 dest = os.path.join(base, wfn)
125 dest = os.path.join(base, wfn)
126
126
127 fns_and_mtime.append((dest, repo.wjoin(fn),
127 fns_and_mtime.append((dest, repo.wjoin(fn),
128 os.lstat(dest).st_mtime))
128 os.lstat(dest).st_mtime))
129 return dirname, fns_and_mtime
129 return dirname, fns_and_mtime
130
130
131 def dodiff(ui, repo, cmdline, pats, opts):
131 def dodiff(ui, repo, cmdline, pats, opts):
132 '''Do the actual diff:
132 '''Do the actual diff:
133
133
134 - copy to a temp structure if diffing 2 internal revisions
134 - copy to a temp structure if diffing 2 internal revisions
135 - copy to a temp structure if diffing working revision with
135 - copy to a temp structure if diffing working revision with
136 another one and more than 1 file is changed
136 another one and more than 1 file is changed
137 - just invoke the diff for a single file in the working dir
137 - just invoke the diff for a single file in the working dir
138 '''
138 '''
139
139
140 revs = opts.get('rev')
140 revs = opts.get('rev')
141 change = opts.get('change')
141 change = opts.get('change')
142 do3way = '$parent2' in cmdline
142 do3way = '$parent2' in cmdline
143
143
144 if revs and change:
144 if revs and change:
145 msg = _('cannot specify --rev and --change at the same time')
145 msg = _('cannot specify --rev and --change at the same time')
146 raise error.Abort(msg)
146 raise error.Abort(msg)
147 elif change:
147 elif change:
148 node2 = scmutil.revsingle(repo, change, None).node()
148 node2 = scmutil.revsingle(repo, change, None).node()
149 node1a, node1b = repo.changelog.parents(node2)
149 node1a, node1b = repo.changelog.parents(node2)
150 else:
150 else:
151 node1a, node2 = scmutil.revpair(repo, revs)
151 node1a, node2 = scmutil.revpair(repo, revs)
152 if not revs:
152 if not revs:
153 node1b = repo.dirstate.p2()
153 node1b = repo.dirstate.p2()
154 else:
154 else:
155 node1b = nullid
155 node1b = nullid
156
156
157 # Disable 3-way merge if there is only one parent
157 # Disable 3-way merge if there is only one parent
158 if do3way:
158 if do3way:
159 if node1b == nullid:
159 if node1b == nullid:
160 do3way = False
160 do3way = False
161
161
162 subrepos=opts.get('subrepos')
162 subrepos=opts.get('subrepos')
163
163
164 matcher = scmutil.match(repo[node2], pats, opts)
164 matcher = scmutil.match(repo[node2], pats, opts)
165
165
166 if opts.get('patch'):
166 if opts.get('patch'):
167 if subrepos:
167 if subrepos:
168 raise error.Abort(_('--patch cannot be used with --subrepos'))
168 raise error.Abort(_('--patch cannot be used with --subrepos'))
169 if node2 is None:
169 if node2 is None:
170 raise error.Abort(_('--patch requires two revisions'))
170 raise error.Abort(_('--patch requires two revisions'))
171 else:
171 else:
172 mod_a, add_a, rem_a = map(set, repo.status(node1a, node2, matcher,
172 mod_a, add_a, rem_a = map(set, repo.status(node1a, node2, matcher,
173 listsubrepos=subrepos)[:3])
173 listsubrepos=subrepos)[:3])
174 if do3way:
174 if do3way:
175 mod_b, add_b, rem_b = map(set,
175 mod_b, add_b, rem_b = map(set,
176 repo.status(node1b, node2, matcher,
176 repo.status(node1b, node2, matcher,
177 listsubrepos=subrepos)[:3])
177 listsubrepos=subrepos)[:3])
178 else:
178 else:
179 mod_b, add_b, rem_b = set(), set(), set()
179 mod_b, add_b, rem_b = set(), set(), set()
180 modadd = mod_a | add_a | mod_b | add_b
180 modadd = mod_a | add_a | mod_b | add_b
181 common = modadd | rem_a | rem_b
181 common = modadd | rem_a | rem_b
182 if not common:
182 if not common:
183 return 0
183 return 0
184
184
185 tmproot = tempfile.mkdtemp(prefix='extdiff.')
185 tmproot = tempfile.mkdtemp(prefix='extdiff.')
186 try:
186 try:
187 if not opts.get('patch'):
187 if not opts.get('patch'):
188 # Always make a copy of node1a (and node1b, if applicable)
188 # Always make a copy of node1a (and node1b, if applicable)
189 dir1a_files = mod_a | rem_a | ((mod_b | add_b) - add_a)
189 dir1a_files = mod_a | rem_a | ((mod_b | add_b) - add_a)
190 dir1a = snapshot(ui, repo, dir1a_files, node1a, tmproot,
190 dir1a = snapshot(ui, repo, dir1a_files, node1a, tmproot,
191 subrepos)[0]
191 subrepos)[0]
192 rev1a = '@%d' % repo[node1a].rev()
192 rev1a = '@%d' % repo[node1a].rev()
193 if do3way:
193 if do3way:
194 dir1b_files = mod_b | rem_b | ((mod_a | add_a) - add_b)
194 dir1b_files = mod_b | rem_b | ((mod_a | add_a) - add_b)
195 dir1b = snapshot(ui, repo, dir1b_files, node1b, tmproot,
195 dir1b = snapshot(ui, repo, dir1b_files, node1b, tmproot,
196 subrepos)[0]
196 subrepos)[0]
197 rev1b = '@%d' % repo[node1b].rev()
197 rev1b = '@%d' % repo[node1b].rev()
198 else:
198 else:
199 dir1b = None
199 dir1b = None
200 rev1b = ''
200 rev1b = ''
201
201
202 fns_and_mtime = []
202 fns_and_mtime = []
203
203
204 # If node2 in not the wc or there is >1 change, copy it
204 # If node2 in not the wc or there is >1 change, copy it
205 dir2root = ''
205 dir2root = ''
206 rev2 = ''
206 rev2 = ''
207 if node2:
207 if node2:
208 dir2 = snapshot(ui, repo, modadd, node2, tmproot, subrepos)[0]
208 dir2 = snapshot(ui, repo, modadd, node2, tmproot, subrepos)[0]
209 rev2 = '@%d' % repo[node2].rev()
209 rev2 = '@%d' % repo[node2].rev()
210 elif len(common) > 1:
210 elif len(common) > 1:
211 #we only actually need to get the files to copy back to
211 #we only actually need to get the files to copy back to
212 #the working dir in this case (because the other cases
212 #the working dir in this case (because the other cases
213 #are: diffing 2 revisions or single file -- in which case
213 #are: diffing 2 revisions or single file -- in which case
214 #the file is already directly passed to the diff tool).
214 #the file is already directly passed to the diff tool).
215 dir2, fns_and_mtime = snapshot(ui, repo, modadd, None, tmproot,
215 dir2, fns_and_mtime = snapshot(ui, repo, modadd, None, tmproot,
216 subrepos)
216 subrepos)
217 else:
217 else:
218 # This lets the diff tool open the changed file directly
218 # This lets the diff tool open the changed file directly
219 dir2 = ''
219 dir2 = ''
220 dir2root = repo.root
220 dir2root = repo.root
221
221
222 label1a = rev1a
222 label1a = rev1a
223 label1b = rev1b
223 label1b = rev1b
224 label2 = rev2
224 label2 = rev2
225
225
226 # If only one change, diff the files instead of the directories
226 # If only one change, diff the files instead of the directories
227 # Handle bogus modifies correctly by checking if the files exist
227 # Handle bogus modifies correctly by checking if the files exist
228 if len(common) == 1:
228 if len(common) == 1:
229 common_file = util.localpath(common.pop())
229 common_file = util.localpath(common.pop())
230 dir1a = os.path.join(tmproot, dir1a, common_file)
230 dir1a = os.path.join(tmproot, dir1a, common_file)
231 label1a = common_file + rev1a
231 label1a = common_file + rev1a
232 if not os.path.isfile(dir1a):
232 if not os.path.isfile(dir1a):
233 dir1a = os.devnull
233 dir1a = os.devnull
234 if do3way:
234 if do3way:
235 dir1b = os.path.join(tmproot, dir1b, common_file)
235 dir1b = os.path.join(tmproot, dir1b, common_file)
236 label1b = common_file + rev1b
236 label1b = common_file + rev1b
237 if not os.path.isfile(dir1b):
237 if not os.path.isfile(dir1b):
238 dir1b = os.devnull
238 dir1b = os.devnull
239 dir2 = os.path.join(dir2root, dir2, common_file)
239 dir2 = os.path.join(dir2root, dir2, common_file)
240 label2 = common_file + rev2
240 label2 = common_file + rev2
241 else:
241 else:
242 template = 'hg-%h.patch'
242 template = 'hg-%h.patch'
243 cmdutil.export(repo, [repo[node1a].rev(), repo[node2].rev()],
243 cmdutil.export(repo, [repo[node1a].rev(), repo[node2].rev()],
244 template=repo.vfs.reljoin(tmproot, template),
244 template=repo.vfs.reljoin(tmproot, template),
245 match=matcher)
245 match=matcher)
246 label1a = cmdutil.makefilename(repo, template, node1a)
246 label1a = cmdutil.makefilename(repo, template, node1a)
247 label2 = cmdutil.makefilename(repo, template, node2)
247 label2 = cmdutil.makefilename(repo, template, node2)
248 dir1a = repo.vfs.reljoin(tmproot, label1a)
248 dir1a = repo.vfs.reljoin(tmproot, label1a)
249 dir2 = repo.vfs.reljoin(tmproot, label2)
249 dir2 = repo.vfs.reljoin(tmproot, label2)
250 dir1b = None
250 dir1b = None
251 label1b = None
251 label1b = None
252 fns_and_mtime = []
252 fns_and_mtime = []
253
253
254 # Function to quote file/dir names in the argument string.
254 # Function to quote file/dir names in the argument string.
255 # When not operating in 3-way mode, an empty string is
255 # When not operating in 3-way mode, an empty string is
256 # returned for parent2
256 # returned for parent2
257 replace = {'parent': dir1a, 'parent1': dir1a, 'parent2': dir1b,
257 replace = {'parent': dir1a, 'parent1': dir1a, 'parent2': dir1b,
258 'plabel1': label1a, 'plabel2': label1b,
258 'plabel1': label1a, 'plabel2': label1b,
259 'clabel': label2, 'child': dir2,
259 'clabel': label2, 'child': dir2,
260 'root': repo.root}
260 'root': repo.root}
261 def quote(match):
261 def quote(match):
262 pre = match.group(2)
262 pre = match.group(2)
263 key = match.group(3)
263 key = match.group(3)
264 if not do3way and key == 'parent2':
264 if not do3way and key == 'parent2':
265 return pre
265 return pre
266 return pre + util.shellquote(replace[key])
266 return pre + util.shellquote(replace[key])
267
267
268 # Match parent2 first, so 'parent1?' will match both parent1 and parent
268 # Match parent2 first, so 'parent1?' will match both parent1 and parent
269 regex = (r'''(['"]?)([^\s'"$]*)'''
269 regex = (r'''(['"]?)([^\s'"$]*)'''
270 r'\$(parent2|parent1?|child|plabel1|plabel2|clabel|root)\1')
270 r'\$(parent2|parent1?|child|plabel1|plabel2|clabel|root)\1')
271 if not do3way and not re.search(regex, cmdline):
271 if not do3way and not re.search(regex, cmdline):
272 cmdline += ' $parent1 $child'
272 cmdline += ' $parent1 $child'
273 cmdline = re.sub(regex, quote, cmdline)
273 cmdline = re.sub(regex, quote, cmdline)
274
274
275 ui.debug('running %r in %s\n' % (cmdline, tmproot))
275 ui.debug('running %r in %s\n' % (cmdline, tmproot))
276 ui.system(cmdline, cwd=tmproot)
276 ui.system(cmdline, cwd=tmproot)
277
277
278 for copy_fn, working_fn, mtime in fns_and_mtime:
278 for copy_fn, working_fn, mtime in fns_and_mtime:
279 if os.lstat(copy_fn).st_mtime != mtime:
279 if os.lstat(copy_fn).st_mtime != mtime:
280 ui.debug('file changed while diffing. '
280 ui.debug('file changed while diffing. '
281 'Overwriting: %s (src: %s)\n' % (working_fn, copy_fn))
281 'Overwriting: %s (src: %s)\n' % (working_fn, copy_fn))
282 util.copyfile(copy_fn, working_fn)
282 util.copyfile(copy_fn, working_fn)
283
283
284 return 1
284 return 1
285 finally:
285 finally:
286 ui.note(_('cleaning up temp directory\n'))
286 ui.note(_('cleaning up temp directory\n'))
287 shutil.rmtree(tmproot)
287 shutil.rmtree(tmproot)
288
288
289 extdiffopts = [
289 extdiffopts = [
290 ('o', 'option', [],
290 ('o', 'option', [],
291 _('pass option to comparison program'), _('OPT')),
291 _('pass option to comparison program'), _('OPT')),
292 ('r', 'rev', [], _('revision'), _('REV')),
292 ('r', 'rev', [], _('revision'), _('REV')),
293 ('c', 'change', '', _('change made by revision'), _('REV')),
293 ('c', 'change', '', _('change made by revision'), _('REV')),
294 ('', 'patch', None, _('compare patches for two revisions'))
294 ('', 'patch', None, _('compare patches for two revisions'))
295 ] + commands.walkopts + commands.subrepoopts
295 ] + commands.walkopts + commands.subrepoopts
296
296
297 @command('extdiff',
297 @command('extdiff',
298 [('p', 'program', '', _('comparison program to run'), _('CMD')),
298 [('p', 'program', '', _('comparison program to run'), _('CMD')),
299 ] + extdiffopts,
299 ] + extdiffopts,
300 _('hg extdiff [OPT]... [FILE]...'),
300 _('hg extdiff [OPT]... [FILE]...'),
301 inferrepo=True)
301 inferrepo=True)
302 def extdiff(ui, repo, *pats, **opts):
302 def extdiff(ui, repo, *pats, **opts):
303 '''use external program to diff repository (or selected files)
303 '''use external program to diff repository (or selected files)
304
304
305 Show differences between revisions for the specified files, using
305 Show differences between revisions for the specified files, using
306 an external program. The default program used is diff, with
306 an external program. The default program used is diff, with
307 default options "-Npru".
307 default options "-Npru".
308
308
309 To select a different program, use the -p/--program option. The
309 To select a different program, use the -p/--program option. The
310 program will be passed the names of two directories to compare. To
310 program will be passed the names of two directories to compare. To
311 pass additional options to the program, use -o/--option. These
311 pass additional options to the program, use -o/--option. These
312 will be passed before the names of the directories to compare.
312 will be passed before the names of the directories to compare.
313
313
314 When two revision arguments are given, then changes are shown
314 When two revision arguments are given, then changes are shown
315 between those revisions. If only one revision is specified then
315 between those revisions. If only one revision is specified then
316 that revision is compared to the working directory, and, when no
316 that revision is compared to the working directory, and, when no
317 revisions are specified, the working directory files are compared
317 revisions are specified, the working directory files are compared
318 to its parent.'''
318 to its parent.'''
319 program = opts.get('program')
319 program = opts.get('program')
320 option = opts.get('option')
320 option = opts.get('option')
321 if not program:
321 if not program:
322 program = 'diff'
322 program = 'diff'
323 option = option or ['-Npru']
323 option = option or ['-Npru']
324 cmdline = ' '.join(map(util.shellquote, [program] + option))
324 cmdline = ' '.join(map(util.shellquote, [program] + option))
325 return dodiff(ui, repo, cmdline, pats, opts)
325 return dodiff(ui, repo, cmdline, pats, opts)
326
326
327 class savedcmd(object):
327 class savedcmd(object):
328 """use external program to diff repository (or selected files)
328 """use external program to diff repository (or selected files)
329
329
330 Show differences between revisions for the specified files, using
330 Show differences between revisions for the specified files, using
331 the following program::
331 the following program::
332
332
333 %(path)s
333 %(path)s
334
334
335 When two revision arguments are given, then changes are shown
335 When two revision arguments are given, then changes are shown
336 between those revisions. If only one revision is specified then
336 between those revisions. If only one revision is specified then
337 that revision is compared to the working directory, and, when no
337 that revision is compared to the working directory, and, when no
338 revisions are specified, the working directory files are compared
338 revisions are specified, the working directory files are compared
339 to its parent.
339 to its parent.
340 """
340 """
341
341
342 def __init__(self, path, cmdline):
342 def __init__(self, path, cmdline):
343 # We can't pass non-ASCII through docstrings (and path is
343 # We can't pass non-ASCII through docstrings (and path is
344 # in an unknown encoding anyway)
344 # in an unknown encoding anyway)
345 docpath = path.encode("string-escape")
345 docpath = path.encode("string-escape")
346 self.__doc__ = self.__doc__ % {'path': util.uirepr(docpath)}
346 self.__doc__ = self.__doc__ % {'path': util.uirepr(docpath)}
347 self._cmdline = cmdline
347 self._cmdline = cmdline
348
348
349 def __call__(self, ui, repo, *pats, **opts):
349 def __call__(self, ui, repo, *pats, **opts):
350 options = ' '.join(map(util.shellquote, opts['option']))
350 options = ' '.join(map(util.shellquote, opts['option']))
351 if options:
351 if options:
352 options = ' ' + options
352 options = ' ' + options
353 return dodiff(ui, repo, self._cmdline + options, pats, opts)
353 return dodiff(ui, repo, self._cmdline + options, pats, opts)
354
354
355 def uisetup(ui):
355 def uisetup(ui):
356 for cmd, path in ui.configitems('extdiff'):
356 for cmd, path in ui.configitems('extdiff'):
357 path = util.expandpath(path)
357 path = util.expandpath(path)
358 if cmd.startswith('cmd.'):
358 if cmd.startswith('cmd.'):
359 cmd = cmd[4:]
359 cmd = cmd[4:]
360 if not path:
360 if not path:
361 path = util.findexe(cmd)
361 path = util.findexe(cmd)
362 if path is None:
362 if path is None:
363 path = filemerge.findexternaltool(ui, cmd) or cmd
363 path = filemerge.findexternaltool(ui, cmd) or cmd
364 diffopts = ui.config('extdiff', 'opts.' + cmd, '')
364 diffopts = ui.config('extdiff', 'opts.' + cmd, '')
365 cmdline = util.shellquote(path)
365 cmdline = util.shellquote(path)
366 if diffopts:
366 if diffopts:
367 cmdline += ' ' + diffopts
367 cmdline += ' ' + diffopts
368 elif cmd.startswith('opts.'):
368 elif cmd.startswith('opts.'):
369 continue
369 continue
370 else:
370 else:
371 if path:
371 if path:
372 # case "cmd = path opts"
372 # case "cmd = path opts"
373 cmdline = path
373 cmdline = path
374 diffopts = len(shlex.split(cmdline)) > 1
374 diffopts = len(shlex.split(cmdline)) > 1
375 else:
375 else:
376 # case "cmd ="
376 # case "cmd ="
377 path = util.findexe(cmd)
377 path = util.findexe(cmd)
378 if path is None:
378 if path is None:
379 path = filemerge.findexternaltool(ui, cmd) or cmd
379 path = filemerge.findexternaltool(ui, cmd) or cmd
380 cmdline = util.shellquote(path)
380 cmdline = util.shellquote(path)
381 diffopts = False
381 diffopts = False
382 # look for diff arguments in [diff-tools] then [merge-tools]
382 # look for diff arguments in [diff-tools] then [merge-tools]
383 if not diffopts:
383 if not diffopts:
384 args = ui.config('diff-tools', cmd+'.diffargs') or \
384 args = ui.config('diff-tools', cmd+'.diffargs') or \
385 ui.config('merge-tools', cmd+'.diffargs')
385 ui.config('merge-tools', cmd+'.diffargs')
386 if args:
386 if args:
387 cmdline += ' ' + args
387 cmdline += ' ' + args
388 command(cmd, extdiffopts[:], _('hg %s [OPTION]... [FILE]...') % cmd,
388 command(cmd, extdiffopts[:], _('hg %s [OPTION]... [FILE]...') % cmd,
389 inferrepo=True)(savedcmd(path, cmdline))
389 inferrepo=True)(savedcmd(path, cmdline))
390
390
391 # tell hggettext to extract docstrings from these functions:
391 # tell hggettext to extract docstrings from these functions:
392 i18nfunctions = [savedcmd]
392 i18nfunctions = [savedcmd]
@@ -1,165 +1,165 b''
1 # fetch.py - pull and merge remote changes
1 # fetch.py - pull and merge remote changes
2 #
2 #
3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 '''pull, update and merge in one command (DEPRECATED)'''
8 '''pull, update and merge in one command (DEPRECATED)'''
9
9
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11
11
12 from mercurial.i18n import _
12 from mercurial.i18n import _
13 from mercurial.node import (
13 from mercurial.node import (
14 short,
14 short,
15 )
15 )
16 from mercurial import (
16 from mercurial import (
17 cmdutil,
17 cmdutil,
18 commands,
18 commands,
19 error,
19 error,
20 exchange,
20 exchange,
21 hg,
21 hg,
22 lock,
22 lock,
23 util,
23 util,
24 )
24 )
25
25
26 release = lock.release
26 release = lock.release
27 cmdtable = {}
27 cmdtable = {}
28 command = cmdutil.command(cmdtable)
28 command = cmdutil.command(cmdtable)
29 # Note for extension authors: ONLY specify testedwith = 'internal' for
29 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
30 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
30 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
31 # be specifying the version(s) of Mercurial they are tested with, or
31 # be specifying the version(s) of Mercurial they are tested with, or
32 # leave the attribute unspecified.
32 # leave the attribute unspecified.
33 testedwith = 'internal'
33 testedwith = 'ships-with-hg-core'
34
34
35 @command('fetch',
35 @command('fetch',
36 [('r', 'rev', [],
36 [('r', 'rev', [],
37 _('a specific revision you would like to pull'), _('REV')),
37 _('a specific revision you would like to pull'), _('REV')),
38 ('e', 'edit', None, _('invoke editor on commit messages')),
38 ('e', 'edit', None, _('invoke editor on commit messages')),
39 ('', 'force-editor', None, _('edit commit message (DEPRECATED)')),
39 ('', 'force-editor', None, _('edit commit message (DEPRECATED)')),
40 ('', 'switch-parent', None, _('switch parents when merging')),
40 ('', 'switch-parent', None, _('switch parents when merging')),
41 ] + commands.commitopts + commands.commitopts2 + commands.remoteopts,
41 ] + commands.commitopts + commands.commitopts2 + commands.remoteopts,
42 _('hg fetch [SOURCE]'))
42 _('hg fetch [SOURCE]'))
43 def fetch(ui, repo, source='default', **opts):
43 def fetch(ui, repo, source='default', **opts):
44 '''pull changes from a remote repository, merge new changes if needed.
44 '''pull changes from a remote repository, merge new changes if needed.
45
45
46 This finds all changes from the repository at the specified path
46 This finds all changes from the repository at the specified path
47 or URL and adds them to the local repository.
47 or URL and adds them to the local repository.
48
48
49 If the pulled changes add a new branch head, the head is
49 If the pulled changes add a new branch head, the head is
50 automatically merged, and the result of the merge is committed.
50 automatically merged, and the result of the merge is committed.
51 Otherwise, the working directory is updated to include the new
51 Otherwise, the working directory is updated to include the new
52 changes.
52 changes.
53
53
54 When a merge is needed, the working directory is first updated to
54 When a merge is needed, the working directory is first updated to
55 the newly pulled changes. Local changes are then merged into the
55 the newly pulled changes. Local changes are then merged into the
56 pulled changes. To switch the merge order, use --switch-parent.
56 pulled changes. To switch the merge order, use --switch-parent.
57
57
58 See :hg:`help dates` for a list of formats valid for -d/--date.
58 See :hg:`help dates` for a list of formats valid for -d/--date.
59
59
60 Returns 0 on success.
60 Returns 0 on success.
61 '''
61 '''
62
62
63 date = opts.get('date')
63 date = opts.get('date')
64 if date:
64 if date:
65 opts['date'] = util.parsedate(date)
65 opts['date'] = util.parsedate(date)
66
66
67 parent, _p2 = repo.dirstate.parents()
67 parent, _p2 = repo.dirstate.parents()
68 branch = repo.dirstate.branch()
68 branch = repo.dirstate.branch()
69 try:
69 try:
70 branchnode = repo.branchtip(branch)
70 branchnode = repo.branchtip(branch)
71 except error.RepoLookupError:
71 except error.RepoLookupError:
72 branchnode = None
72 branchnode = None
73 if parent != branchnode:
73 if parent != branchnode:
74 raise error.Abort(_('working directory not at branch tip'),
74 raise error.Abort(_('working directory not at branch tip'),
75 hint=_("use 'hg update' to check out branch tip"))
75 hint=_("use 'hg update' to check out branch tip"))
76
76
77 wlock = lock = None
77 wlock = lock = None
78 try:
78 try:
79 wlock = repo.wlock()
79 wlock = repo.wlock()
80 lock = repo.lock()
80 lock = repo.lock()
81
81
82 cmdutil.bailifchanged(repo)
82 cmdutil.bailifchanged(repo)
83
83
84 bheads = repo.branchheads(branch)
84 bheads = repo.branchheads(branch)
85 bheads = [head for head in bheads if len(repo[head].children()) == 0]
85 bheads = [head for head in bheads if len(repo[head].children()) == 0]
86 if len(bheads) > 1:
86 if len(bheads) > 1:
87 raise error.Abort(_('multiple heads in this branch '
87 raise error.Abort(_('multiple heads in this branch '
88 '(use "hg heads ." and "hg merge" to merge)'))
88 '(use "hg heads ." and "hg merge" to merge)'))
89
89
90 other = hg.peer(repo, opts, ui.expandpath(source))
90 other = hg.peer(repo, opts, ui.expandpath(source))
91 ui.status(_('pulling from %s\n') %
91 ui.status(_('pulling from %s\n') %
92 util.hidepassword(ui.expandpath(source)))
92 util.hidepassword(ui.expandpath(source)))
93 revs = None
93 revs = None
94 if opts['rev']:
94 if opts['rev']:
95 try:
95 try:
96 revs = [other.lookup(rev) for rev in opts['rev']]
96 revs = [other.lookup(rev) for rev in opts['rev']]
97 except error.CapabilityError:
97 except error.CapabilityError:
98 err = _("other repository doesn't support revision lookup, "
98 err = _("other repository doesn't support revision lookup, "
99 "so a rev cannot be specified.")
99 "so a rev cannot be specified.")
100 raise error.Abort(err)
100 raise error.Abort(err)
101
101
102 # Are there any changes at all?
102 # Are there any changes at all?
103 modheads = exchange.pull(repo, other, heads=revs).cgresult
103 modheads = exchange.pull(repo, other, heads=revs).cgresult
104 if modheads == 0:
104 if modheads == 0:
105 return 0
105 return 0
106
106
107 # Is this a simple fast-forward along the current branch?
107 # Is this a simple fast-forward along the current branch?
108 newheads = repo.branchheads(branch)
108 newheads = repo.branchheads(branch)
109 newchildren = repo.changelog.nodesbetween([parent], newheads)[2]
109 newchildren = repo.changelog.nodesbetween([parent], newheads)[2]
110 if len(newheads) == 1 and len(newchildren):
110 if len(newheads) == 1 and len(newchildren):
111 if newchildren[0] != parent:
111 if newchildren[0] != parent:
112 return hg.update(repo, newchildren[0])
112 return hg.update(repo, newchildren[0])
113 else:
113 else:
114 return 0
114 return 0
115
115
116 # Are there more than one additional branch heads?
116 # Are there more than one additional branch heads?
117 newchildren = [n for n in newchildren if n != parent]
117 newchildren = [n for n in newchildren if n != parent]
118 newparent = parent
118 newparent = parent
119 if newchildren:
119 if newchildren:
120 newparent = newchildren[0]
120 newparent = newchildren[0]
121 hg.clean(repo, newparent)
121 hg.clean(repo, newparent)
122 newheads = [n for n in newheads if n != newparent]
122 newheads = [n for n in newheads if n != newparent]
123 if len(newheads) > 1:
123 if len(newheads) > 1:
124 ui.status(_('not merging with %d other new branch heads '
124 ui.status(_('not merging with %d other new branch heads '
125 '(use "hg heads ." and "hg merge" to merge them)\n') %
125 '(use "hg heads ." and "hg merge" to merge them)\n') %
126 (len(newheads) - 1))
126 (len(newheads) - 1))
127 return 1
127 return 1
128
128
129 if not newheads:
129 if not newheads:
130 return 0
130 return 0
131
131
132 # Otherwise, let's merge.
132 # Otherwise, let's merge.
133 err = False
133 err = False
134 if newheads:
134 if newheads:
135 # By default, we consider the repository we're pulling
135 # By default, we consider the repository we're pulling
136 # *from* as authoritative, so we merge our changes into
136 # *from* as authoritative, so we merge our changes into
137 # theirs.
137 # theirs.
138 if opts['switch_parent']:
138 if opts['switch_parent']:
139 firstparent, secondparent = newparent, newheads[0]
139 firstparent, secondparent = newparent, newheads[0]
140 else:
140 else:
141 firstparent, secondparent = newheads[0], newparent
141 firstparent, secondparent = newheads[0], newparent
142 ui.status(_('updating to %d:%s\n') %
142 ui.status(_('updating to %d:%s\n') %
143 (repo.changelog.rev(firstparent),
143 (repo.changelog.rev(firstparent),
144 short(firstparent)))
144 short(firstparent)))
145 hg.clean(repo, firstparent)
145 hg.clean(repo, firstparent)
146 ui.status(_('merging with %d:%s\n') %
146 ui.status(_('merging with %d:%s\n') %
147 (repo.changelog.rev(secondparent), short(secondparent)))
147 (repo.changelog.rev(secondparent), short(secondparent)))
148 err = hg.merge(repo, secondparent, remind=False)
148 err = hg.merge(repo, secondparent, remind=False)
149
149
150 if not err:
150 if not err:
151 # we don't translate commit messages
151 # we don't translate commit messages
152 message = (cmdutil.logmessage(ui, opts) or
152 message = (cmdutil.logmessage(ui, opts) or
153 ('Automated merge with %s' %
153 ('Automated merge with %s' %
154 util.removeauth(other.url())))
154 util.removeauth(other.url())))
155 editopt = opts.get('edit') or opts.get('force_editor')
155 editopt = opts.get('edit') or opts.get('force_editor')
156 editor = cmdutil.getcommiteditor(edit=editopt, editform='fetch')
156 editor = cmdutil.getcommiteditor(edit=editopt, editform='fetch')
157 n = repo.commit(message, opts['user'], opts['date'], editor=editor)
157 n = repo.commit(message, opts['user'], opts['date'], editor=editor)
158 ui.status(_('new changeset %d:%s merges remote changes '
158 ui.status(_('new changeset %d:%s merges remote changes '
159 'with local\n') % (repo.changelog.rev(n),
159 'with local\n') % (repo.changelog.rev(n),
160 short(n)))
160 short(n)))
161
161
162 return err
162 return err
163
163
164 finally:
164 finally:
165 release(lock, wlock)
165 release(lock, wlock)
@@ -1,695 +1,695 b''
1 # __init__.py - fsmonitor initialization and overrides
1 # __init__.py - fsmonitor initialization and overrides
2 #
2 #
3 # Copyright 2013-2016 Facebook, Inc.
3 # Copyright 2013-2016 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 '''Faster status operations with the Watchman file monitor (EXPERIMENTAL)
8 '''Faster status operations with the Watchman file monitor (EXPERIMENTAL)
9
9
10 Integrates the file-watching program Watchman with Mercurial to produce faster
10 Integrates the file-watching program Watchman with Mercurial to produce faster
11 status results.
11 status results.
12
12
13 On a particular Linux system, for a real-world repository with over 400,000
13 On a particular Linux system, for a real-world repository with over 400,000
14 files hosted on ext4, vanilla `hg status` takes 1.3 seconds. On the same
14 files hosted on ext4, vanilla `hg status` takes 1.3 seconds. On the same
15 system, with fsmonitor it takes about 0.3 seconds.
15 system, with fsmonitor it takes about 0.3 seconds.
16
16
17 fsmonitor requires no configuration -- it will tell Watchman about your
17 fsmonitor requires no configuration -- it will tell Watchman about your
18 repository as necessary. You'll need to install Watchman from
18 repository as necessary. You'll need to install Watchman from
19 https://facebook.github.io/watchman/ and make sure it is in your PATH.
19 https://facebook.github.io/watchman/ and make sure it is in your PATH.
20
20
21 The following configuration options exist:
21 The following configuration options exist:
22
22
23 ::
23 ::
24
24
25 [fsmonitor]
25 [fsmonitor]
26 mode = {off, on, paranoid}
26 mode = {off, on, paranoid}
27
27
28 When `mode = off`, fsmonitor will disable itself (similar to not loading the
28 When `mode = off`, fsmonitor will disable itself (similar to not loading the
29 extension at all). When `mode = on`, fsmonitor will be enabled (the default).
29 extension at all). When `mode = on`, fsmonitor will be enabled (the default).
30 When `mode = paranoid`, fsmonitor will query both Watchman and the filesystem,
30 When `mode = paranoid`, fsmonitor will query both Watchman and the filesystem,
31 and ensure that the results are consistent.
31 and ensure that the results are consistent.
32
32
33 ::
33 ::
34
34
35 [fsmonitor]
35 [fsmonitor]
36 timeout = (float)
36 timeout = (float)
37
37
38 A value, in seconds, that determines how long fsmonitor will wait for Watchman
38 A value, in seconds, that determines how long fsmonitor will wait for Watchman
39 to return results. Defaults to `2.0`.
39 to return results. Defaults to `2.0`.
40
40
41 ::
41 ::
42
42
43 [fsmonitor]
43 [fsmonitor]
44 blacklistusers = (list of userids)
44 blacklistusers = (list of userids)
45
45
46 A list of usernames for which fsmonitor will disable itself altogether.
46 A list of usernames for which fsmonitor will disable itself altogether.
47
47
48 ::
48 ::
49
49
50 [fsmonitor]
50 [fsmonitor]
51 walk_on_invalidate = (boolean)
51 walk_on_invalidate = (boolean)
52
52
53 Whether or not to walk the whole repo ourselves when our cached state has been
53 Whether or not to walk the whole repo ourselves when our cached state has been
54 invalidated, for example when Watchman has been restarted or .hgignore rules
54 invalidated, for example when Watchman has been restarted or .hgignore rules
55 have been changed. Walking the repo in that case can result in competing for
55 have been changed. Walking the repo in that case can result in competing for
56 I/O with Watchman. For large repos it is recommended to set this value to
56 I/O with Watchman. For large repos it is recommended to set this value to
57 false. You may wish to set this to true if you have a very fast filesystem
57 false. You may wish to set this to true if you have a very fast filesystem
58 that can outpace the IPC overhead of getting the result data for the full repo
58 that can outpace the IPC overhead of getting the result data for the full repo
59 from Watchman. Defaults to false.
59 from Watchman. Defaults to false.
60
60
61 fsmonitor is incompatible with the largefiles and eol extensions, and
61 fsmonitor is incompatible with the largefiles and eol extensions, and
62 will disable itself if any of those are active.
62 will disable itself if any of those are active.
63
63
64 '''
64 '''
65
65
66 # Platforms Supported
66 # Platforms Supported
67 # ===================
67 # ===================
68 #
68 #
69 # **Linux:** *Stable*. Watchman and fsmonitor are both known to work reliably,
69 # **Linux:** *Stable*. Watchman and fsmonitor are both known to work reliably,
70 # even under severe loads.
70 # even under severe loads.
71 #
71 #
72 # **Mac OS X:** *Stable*. The Mercurial test suite passes with fsmonitor
72 # **Mac OS X:** *Stable*. The Mercurial test suite passes with fsmonitor
73 # turned on, on case-insensitive HFS+. There has been a reasonable amount of
73 # turned on, on case-insensitive HFS+. There has been a reasonable amount of
74 # user testing under normal loads.
74 # user testing under normal loads.
75 #
75 #
76 # **Solaris, BSD:** *Alpha*. watchman and fsmonitor are believed to work, but
76 # **Solaris, BSD:** *Alpha*. watchman and fsmonitor are believed to work, but
77 # very little testing has been done.
77 # very little testing has been done.
78 #
78 #
79 # **Windows:** *Alpha*. Not in a release version of watchman or fsmonitor yet.
79 # **Windows:** *Alpha*. Not in a release version of watchman or fsmonitor yet.
80 #
80 #
81 # Known Issues
81 # Known Issues
82 # ============
82 # ============
83 #
83 #
84 # * fsmonitor will disable itself if any of the following extensions are
84 # * fsmonitor will disable itself if any of the following extensions are
85 # enabled: largefiles, inotify, eol; or if the repository has subrepos.
85 # enabled: largefiles, inotify, eol; or if the repository has subrepos.
86 # * fsmonitor will produce incorrect results if nested repos that are not
86 # * fsmonitor will produce incorrect results if nested repos that are not
87 # subrepos exist. *Workaround*: add nested repo paths to your `.hgignore`.
87 # subrepos exist. *Workaround*: add nested repo paths to your `.hgignore`.
88 #
88 #
89 # The issues related to nested repos and subrepos are probably not fundamental
89 # The issues related to nested repos and subrepos are probably not fundamental
90 # ones. Patches to fix them are welcome.
90 # ones. Patches to fix them are welcome.
91
91
92 from __future__ import absolute_import
92 from __future__ import absolute_import
93
93
94 import hashlib
94 import hashlib
95 import os
95 import os
96 import stat
96 import stat
97 import sys
97 import sys
98
98
99 from mercurial.i18n import _
99 from mercurial.i18n import _
100 from mercurial import (
100 from mercurial import (
101 context,
101 context,
102 extensions,
102 extensions,
103 localrepo,
103 localrepo,
104 merge,
104 merge,
105 pathutil,
105 pathutil,
106 scmutil,
106 scmutil,
107 util,
107 util,
108 )
108 )
109 from mercurial import match as matchmod
109 from mercurial import match as matchmod
110
110
111 from . import (
111 from . import (
112 state,
112 state,
113 watchmanclient,
113 watchmanclient,
114 )
114 )
115
115
116 # Note for extension authors: ONLY specify testedwith = 'internal' for
116 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
117 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
117 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
118 # be specifying the version(s) of Mercurial they are tested with, or
118 # be specifying the version(s) of Mercurial they are tested with, or
119 # leave the attribute unspecified.
119 # leave the attribute unspecified.
120 testedwith = 'internal'
120 testedwith = 'ships-with-hg-core'
121
121
122 # This extension is incompatible with the following blacklisted extensions
122 # This extension is incompatible with the following blacklisted extensions
123 # and will disable itself when encountering one of these:
123 # and will disable itself when encountering one of these:
124 _blacklist = ['largefiles', 'eol']
124 _blacklist = ['largefiles', 'eol']
125
125
126 def _handleunavailable(ui, state, ex):
126 def _handleunavailable(ui, state, ex):
127 """Exception handler for Watchman interaction exceptions"""
127 """Exception handler for Watchman interaction exceptions"""
128 if isinstance(ex, watchmanclient.Unavailable):
128 if isinstance(ex, watchmanclient.Unavailable):
129 if ex.warn:
129 if ex.warn:
130 ui.warn(str(ex) + '\n')
130 ui.warn(str(ex) + '\n')
131 if ex.invalidate:
131 if ex.invalidate:
132 state.invalidate()
132 state.invalidate()
133 ui.log('fsmonitor', 'Watchman unavailable: %s\n', ex.msg)
133 ui.log('fsmonitor', 'Watchman unavailable: %s\n', ex.msg)
134 else:
134 else:
135 ui.log('fsmonitor', 'Watchman exception: %s\n', ex)
135 ui.log('fsmonitor', 'Watchman exception: %s\n', ex)
136
136
137 def _hashignore(ignore):
137 def _hashignore(ignore):
138 """Calculate hash for ignore patterns and filenames
138 """Calculate hash for ignore patterns and filenames
139
139
140 If this information changes between Mercurial invocations, we can't
140 If this information changes between Mercurial invocations, we can't
141 rely on Watchman information anymore and have to re-scan the working
141 rely on Watchman information anymore and have to re-scan the working
142 copy.
142 copy.
143
143
144 """
144 """
145 sha1 = hashlib.sha1()
145 sha1 = hashlib.sha1()
146 if util.safehasattr(ignore, 'includepat'):
146 if util.safehasattr(ignore, 'includepat'):
147 sha1.update(ignore.includepat)
147 sha1.update(ignore.includepat)
148 sha1.update('\0\0')
148 sha1.update('\0\0')
149 if util.safehasattr(ignore, 'excludepat'):
149 if util.safehasattr(ignore, 'excludepat'):
150 sha1.update(ignore.excludepat)
150 sha1.update(ignore.excludepat)
151 sha1.update('\0\0')
151 sha1.update('\0\0')
152 if util.safehasattr(ignore, 'patternspat'):
152 if util.safehasattr(ignore, 'patternspat'):
153 sha1.update(ignore.patternspat)
153 sha1.update(ignore.patternspat)
154 sha1.update('\0\0')
154 sha1.update('\0\0')
155 if util.safehasattr(ignore, '_files'):
155 if util.safehasattr(ignore, '_files'):
156 for f in ignore._files:
156 for f in ignore._files:
157 sha1.update(f)
157 sha1.update(f)
158 sha1.update('\0')
158 sha1.update('\0')
159 return sha1.hexdigest()
159 return sha1.hexdigest()
160
160
161 def overridewalk(orig, self, match, subrepos, unknown, ignored, full=True):
161 def overridewalk(orig, self, match, subrepos, unknown, ignored, full=True):
162 '''Replacement for dirstate.walk, hooking into Watchman.
162 '''Replacement for dirstate.walk, hooking into Watchman.
163
163
164 Whenever full is False, ignored is False, and the Watchman client is
164 Whenever full is False, ignored is False, and the Watchman client is
165 available, use Watchman combined with saved state to possibly return only a
165 available, use Watchman combined with saved state to possibly return only a
166 subset of files.'''
166 subset of files.'''
167 def bail():
167 def bail():
168 return orig(match, subrepos, unknown, ignored, full=True)
168 return orig(match, subrepos, unknown, ignored, full=True)
169
169
170 if full or ignored or not self._watchmanclient.available():
170 if full or ignored or not self._watchmanclient.available():
171 return bail()
171 return bail()
172 state = self._fsmonitorstate
172 state = self._fsmonitorstate
173 clock, ignorehash, notefiles = state.get()
173 clock, ignorehash, notefiles = state.get()
174 if not clock:
174 if not clock:
175 if state.walk_on_invalidate:
175 if state.walk_on_invalidate:
176 return bail()
176 return bail()
177 # Initial NULL clock value, see
177 # Initial NULL clock value, see
178 # https://facebook.github.io/watchman/docs/clockspec.html
178 # https://facebook.github.io/watchman/docs/clockspec.html
179 clock = 'c:0:0'
179 clock = 'c:0:0'
180 notefiles = []
180 notefiles = []
181
181
182 def fwarn(f, msg):
182 def fwarn(f, msg):
183 self._ui.warn('%s: %s\n' % (self.pathto(f), msg))
183 self._ui.warn('%s: %s\n' % (self.pathto(f), msg))
184 return False
184 return False
185
185
186 def badtype(mode):
186 def badtype(mode):
187 kind = _('unknown')
187 kind = _('unknown')
188 if stat.S_ISCHR(mode):
188 if stat.S_ISCHR(mode):
189 kind = _('character device')
189 kind = _('character device')
190 elif stat.S_ISBLK(mode):
190 elif stat.S_ISBLK(mode):
191 kind = _('block device')
191 kind = _('block device')
192 elif stat.S_ISFIFO(mode):
192 elif stat.S_ISFIFO(mode):
193 kind = _('fifo')
193 kind = _('fifo')
194 elif stat.S_ISSOCK(mode):
194 elif stat.S_ISSOCK(mode):
195 kind = _('socket')
195 kind = _('socket')
196 elif stat.S_ISDIR(mode):
196 elif stat.S_ISDIR(mode):
197 kind = _('directory')
197 kind = _('directory')
198 return _('unsupported file type (type is %s)') % kind
198 return _('unsupported file type (type is %s)') % kind
199
199
200 ignore = self._ignore
200 ignore = self._ignore
201 dirignore = self._dirignore
201 dirignore = self._dirignore
202 if unknown:
202 if unknown:
203 if _hashignore(ignore) != ignorehash and clock != 'c:0:0':
203 if _hashignore(ignore) != ignorehash and clock != 'c:0:0':
204 # ignore list changed -- can't rely on Watchman state any more
204 # ignore list changed -- can't rely on Watchman state any more
205 if state.walk_on_invalidate:
205 if state.walk_on_invalidate:
206 return bail()
206 return bail()
207 notefiles = []
207 notefiles = []
208 clock = 'c:0:0'
208 clock = 'c:0:0'
209 else:
209 else:
210 # always ignore
210 # always ignore
211 ignore = util.always
211 ignore = util.always
212 dirignore = util.always
212 dirignore = util.always
213
213
214 matchfn = match.matchfn
214 matchfn = match.matchfn
215 matchalways = match.always()
215 matchalways = match.always()
216 dmap = self._map
216 dmap = self._map
217 nonnormalset = getattr(self, '_nonnormalset', None)
217 nonnormalset = getattr(self, '_nonnormalset', None)
218
218
219 copymap = self._copymap
219 copymap = self._copymap
220 getkind = stat.S_IFMT
220 getkind = stat.S_IFMT
221 dirkind = stat.S_IFDIR
221 dirkind = stat.S_IFDIR
222 regkind = stat.S_IFREG
222 regkind = stat.S_IFREG
223 lnkkind = stat.S_IFLNK
223 lnkkind = stat.S_IFLNK
224 join = self._join
224 join = self._join
225 normcase = util.normcase
225 normcase = util.normcase
226 fresh_instance = False
226 fresh_instance = False
227
227
228 exact = skipstep3 = False
228 exact = skipstep3 = False
229 if matchfn == match.exact: # match.exact
229 if matchfn == match.exact: # match.exact
230 exact = True
230 exact = True
231 dirignore = util.always # skip step 2
231 dirignore = util.always # skip step 2
232 elif match.files() and not match.anypats(): # match.match, no patterns
232 elif match.files() and not match.anypats(): # match.match, no patterns
233 skipstep3 = True
233 skipstep3 = True
234
234
235 if not exact and self._checkcase:
235 if not exact and self._checkcase:
236 # note that even though we could receive directory entries, we're only
236 # note that even though we could receive directory entries, we're only
237 # interested in checking if a file with the same name exists. So only
237 # interested in checking if a file with the same name exists. So only
238 # normalize files if possible.
238 # normalize files if possible.
239 normalize = self._normalizefile
239 normalize = self._normalizefile
240 skipstep3 = False
240 skipstep3 = False
241 else:
241 else:
242 normalize = None
242 normalize = None
243
243
244 # step 1: find all explicit files
244 # step 1: find all explicit files
245 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
245 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
246
246
247 skipstep3 = skipstep3 and not (work or dirsnotfound)
247 skipstep3 = skipstep3 and not (work or dirsnotfound)
248 work = [d for d in work if not dirignore(d[0])]
248 work = [d for d in work if not dirignore(d[0])]
249
249
250 if not work and (exact or skipstep3):
250 if not work and (exact or skipstep3):
251 for s in subrepos:
251 for s in subrepos:
252 del results[s]
252 del results[s]
253 del results['.hg']
253 del results['.hg']
254 return results
254 return results
255
255
256 # step 2: query Watchman
256 # step 2: query Watchman
257 try:
257 try:
258 # Use the user-configured timeout for the query.
258 # Use the user-configured timeout for the query.
259 # Add a little slack over the top of the user query to allow for
259 # Add a little slack over the top of the user query to allow for
260 # overheads while transferring the data
260 # overheads while transferring the data
261 self._watchmanclient.settimeout(state.timeout + 0.1)
261 self._watchmanclient.settimeout(state.timeout + 0.1)
262 result = self._watchmanclient.command('query', {
262 result = self._watchmanclient.command('query', {
263 'fields': ['mode', 'mtime', 'size', 'exists', 'name'],
263 'fields': ['mode', 'mtime', 'size', 'exists', 'name'],
264 'since': clock,
264 'since': clock,
265 'expression': [
265 'expression': [
266 'not', [
266 'not', [
267 'anyof', ['dirname', '.hg'],
267 'anyof', ['dirname', '.hg'],
268 ['name', '.hg', 'wholename']
268 ['name', '.hg', 'wholename']
269 ]
269 ]
270 ],
270 ],
271 'sync_timeout': int(state.timeout * 1000),
271 'sync_timeout': int(state.timeout * 1000),
272 'empty_on_fresh_instance': state.walk_on_invalidate,
272 'empty_on_fresh_instance': state.walk_on_invalidate,
273 })
273 })
274 except Exception as ex:
274 except Exception as ex:
275 _handleunavailable(self._ui, state, ex)
275 _handleunavailable(self._ui, state, ex)
276 self._watchmanclient.clearconnection()
276 self._watchmanclient.clearconnection()
277 return bail()
277 return bail()
278 else:
278 else:
279 # We need to propagate the last observed clock up so that we
279 # We need to propagate the last observed clock up so that we
280 # can use it for our next query
280 # can use it for our next query
281 state.setlastclock(result['clock'])
281 state.setlastclock(result['clock'])
282 if result['is_fresh_instance']:
282 if result['is_fresh_instance']:
283 if state.walk_on_invalidate:
283 if state.walk_on_invalidate:
284 state.invalidate()
284 state.invalidate()
285 return bail()
285 return bail()
286 fresh_instance = True
286 fresh_instance = True
287 # Ignore any prior noteable files from the state info
287 # Ignore any prior noteable files from the state info
288 notefiles = []
288 notefiles = []
289
289
290 # for file paths which require normalization and we encounter a case
290 # for file paths which require normalization and we encounter a case
291 # collision, we store our own foldmap
291 # collision, we store our own foldmap
292 if normalize:
292 if normalize:
293 foldmap = dict((normcase(k), k) for k in results)
293 foldmap = dict((normcase(k), k) for k in results)
294
294
295 switch_slashes = os.sep == '\\'
295 switch_slashes = os.sep == '\\'
296 # The order of the results is, strictly speaking, undefined.
296 # The order of the results is, strictly speaking, undefined.
297 # For case changes on a case insensitive filesystem we may receive
297 # For case changes on a case insensitive filesystem we may receive
298 # two entries, one with exists=True and another with exists=False.
298 # two entries, one with exists=True and another with exists=False.
299 # The exists=True entries in the same response should be interpreted
299 # The exists=True entries in the same response should be interpreted
300 # as being happens-after the exists=False entries due to the way that
300 # as being happens-after the exists=False entries due to the way that
301 # Watchman tracks files. We use this property to reconcile deletes
301 # Watchman tracks files. We use this property to reconcile deletes
302 # for name case changes.
302 # for name case changes.
303 for entry in result['files']:
303 for entry in result['files']:
304 fname = entry['name']
304 fname = entry['name']
305 if switch_slashes:
305 if switch_slashes:
306 fname = fname.replace('\\', '/')
306 fname = fname.replace('\\', '/')
307 if normalize:
307 if normalize:
308 normed = normcase(fname)
308 normed = normcase(fname)
309 fname = normalize(fname, True, True)
309 fname = normalize(fname, True, True)
310 foldmap[normed] = fname
310 foldmap[normed] = fname
311 fmode = entry['mode']
311 fmode = entry['mode']
312 fexists = entry['exists']
312 fexists = entry['exists']
313 kind = getkind(fmode)
313 kind = getkind(fmode)
314
314
315 if not fexists:
315 if not fexists:
316 # if marked as deleted and we don't already have a change
316 # if marked as deleted and we don't already have a change
317 # record, mark it as deleted. If we already have an entry
317 # record, mark it as deleted. If we already have an entry
318 # for fname then it was either part of walkexplicit or was
318 # for fname then it was either part of walkexplicit or was
319 # an earlier result that was a case change
319 # an earlier result that was a case change
320 if fname not in results and fname in dmap and (
320 if fname not in results and fname in dmap and (
321 matchalways or matchfn(fname)):
321 matchalways or matchfn(fname)):
322 results[fname] = None
322 results[fname] = None
323 elif kind == dirkind:
323 elif kind == dirkind:
324 if fname in dmap and (matchalways or matchfn(fname)):
324 if fname in dmap and (matchalways or matchfn(fname)):
325 results[fname] = None
325 results[fname] = None
326 elif kind == regkind or kind == lnkkind:
326 elif kind == regkind or kind == lnkkind:
327 if fname in dmap:
327 if fname in dmap:
328 if matchalways or matchfn(fname):
328 if matchalways or matchfn(fname):
329 results[fname] = entry
329 results[fname] = entry
330 elif (matchalways or matchfn(fname)) and not ignore(fname):
330 elif (matchalways or matchfn(fname)) and not ignore(fname):
331 results[fname] = entry
331 results[fname] = entry
332 elif fname in dmap and (matchalways or matchfn(fname)):
332 elif fname in dmap and (matchalways or matchfn(fname)):
333 results[fname] = None
333 results[fname] = None
334
334
335 # step 3: query notable files we don't already know about
335 # step 3: query notable files we don't already know about
336 # XXX try not to iterate over the entire dmap
336 # XXX try not to iterate over the entire dmap
337 if normalize:
337 if normalize:
338 # any notable files that have changed case will already be handled
338 # any notable files that have changed case will already be handled
339 # above, so just check membership in the foldmap
339 # above, so just check membership in the foldmap
340 notefiles = set((normalize(f, True, True) for f in notefiles
340 notefiles = set((normalize(f, True, True) for f in notefiles
341 if normcase(f) not in foldmap))
341 if normcase(f) not in foldmap))
342 visit = set((f for f in notefiles if (f not in results and matchfn(f)
342 visit = set((f for f in notefiles if (f not in results and matchfn(f)
343 and (f in dmap or not ignore(f)))))
343 and (f in dmap or not ignore(f)))))
344
344
345 if nonnormalset is not None and not fresh_instance:
345 if nonnormalset is not None and not fresh_instance:
346 if matchalways:
346 if matchalways:
347 visit.update(f for f in nonnormalset if f not in results)
347 visit.update(f for f in nonnormalset if f not in results)
348 visit.update(f for f in copymap if f not in results)
348 visit.update(f for f in copymap if f not in results)
349 else:
349 else:
350 visit.update(f for f in nonnormalset
350 visit.update(f for f in nonnormalset
351 if f not in results and matchfn(f))
351 if f not in results and matchfn(f))
352 visit.update(f for f in copymap
352 visit.update(f for f in copymap
353 if f not in results and matchfn(f))
353 if f not in results and matchfn(f))
354 else:
354 else:
355 if matchalways:
355 if matchalways:
356 visit.update(f for f, st in dmap.iteritems()
356 visit.update(f for f, st in dmap.iteritems()
357 if (f not in results and
357 if (f not in results and
358 (st[2] < 0 or st[0] != 'n' or fresh_instance)))
358 (st[2] < 0 or st[0] != 'n' or fresh_instance)))
359 visit.update(f for f in copymap if f not in results)
359 visit.update(f for f in copymap if f not in results)
360 else:
360 else:
361 visit.update(f for f, st in dmap.iteritems()
361 visit.update(f for f, st in dmap.iteritems()
362 if (f not in results and
362 if (f not in results and
363 (st[2] < 0 or st[0] != 'n' or fresh_instance)
363 (st[2] < 0 or st[0] != 'n' or fresh_instance)
364 and matchfn(f)))
364 and matchfn(f)))
365 visit.update(f for f in copymap
365 visit.update(f for f in copymap
366 if f not in results and matchfn(f))
366 if f not in results and matchfn(f))
367
367
368 audit = pathutil.pathauditor(self._root).check
368 audit = pathutil.pathauditor(self._root).check
369 auditpass = [f for f in visit if audit(f)]
369 auditpass = [f for f in visit if audit(f)]
370 auditpass.sort()
370 auditpass.sort()
371 auditfail = visit.difference(auditpass)
371 auditfail = visit.difference(auditpass)
372 for f in auditfail:
372 for f in auditfail:
373 results[f] = None
373 results[f] = None
374
374
375 nf = iter(auditpass).next
375 nf = iter(auditpass).next
376 for st in util.statfiles([join(f) for f in auditpass]):
376 for st in util.statfiles([join(f) for f in auditpass]):
377 f = nf()
377 f = nf()
378 if st or f in dmap:
378 if st or f in dmap:
379 results[f] = st
379 results[f] = st
380
380
381 for s in subrepos:
381 for s in subrepos:
382 del results[s]
382 del results[s]
383 del results['.hg']
383 del results['.hg']
384 return results
384 return results
385
385
386 def overridestatus(
386 def overridestatus(
387 orig, self, node1='.', node2=None, match=None, ignored=False,
387 orig, self, node1='.', node2=None, match=None, ignored=False,
388 clean=False, unknown=False, listsubrepos=False):
388 clean=False, unknown=False, listsubrepos=False):
389 listignored = ignored
389 listignored = ignored
390 listclean = clean
390 listclean = clean
391 listunknown = unknown
391 listunknown = unknown
392
392
393 def _cmpsets(l1, l2):
393 def _cmpsets(l1, l2):
394 try:
394 try:
395 if 'FSMONITOR_LOG_FILE' in os.environ:
395 if 'FSMONITOR_LOG_FILE' in os.environ:
396 fn = os.environ['FSMONITOR_LOG_FILE']
396 fn = os.environ['FSMONITOR_LOG_FILE']
397 f = open(fn, 'wb')
397 f = open(fn, 'wb')
398 else:
398 else:
399 fn = 'fsmonitorfail.log'
399 fn = 'fsmonitorfail.log'
400 f = self.opener(fn, 'wb')
400 f = self.opener(fn, 'wb')
401 except (IOError, OSError):
401 except (IOError, OSError):
402 self.ui.warn(_('warning: unable to write to %s\n') % fn)
402 self.ui.warn(_('warning: unable to write to %s\n') % fn)
403 return
403 return
404
404
405 try:
405 try:
406 for i, (s1, s2) in enumerate(zip(l1, l2)):
406 for i, (s1, s2) in enumerate(zip(l1, l2)):
407 if set(s1) != set(s2):
407 if set(s1) != set(s2):
408 f.write('sets at position %d are unequal\n' % i)
408 f.write('sets at position %d are unequal\n' % i)
409 f.write('watchman returned: %s\n' % s1)
409 f.write('watchman returned: %s\n' % s1)
410 f.write('stat returned: %s\n' % s2)
410 f.write('stat returned: %s\n' % s2)
411 finally:
411 finally:
412 f.close()
412 f.close()
413
413
414 if isinstance(node1, context.changectx):
414 if isinstance(node1, context.changectx):
415 ctx1 = node1
415 ctx1 = node1
416 else:
416 else:
417 ctx1 = self[node1]
417 ctx1 = self[node1]
418 if isinstance(node2, context.changectx):
418 if isinstance(node2, context.changectx):
419 ctx2 = node2
419 ctx2 = node2
420 else:
420 else:
421 ctx2 = self[node2]
421 ctx2 = self[node2]
422
422
423 working = ctx2.rev() is None
423 working = ctx2.rev() is None
424 parentworking = working and ctx1 == self['.']
424 parentworking = working and ctx1 == self['.']
425 match = match or matchmod.always(self.root, self.getcwd())
425 match = match or matchmod.always(self.root, self.getcwd())
426
426
427 # Maybe we can use this opportunity to update Watchman's state.
427 # Maybe we can use this opportunity to update Watchman's state.
428 # Mercurial uses workingcommitctx and/or memctx to represent the part of
428 # Mercurial uses workingcommitctx and/or memctx to represent the part of
429 # the workingctx that is to be committed. So don't update the state in
429 # the workingctx that is to be committed. So don't update the state in
430 # that case.
430 # that case.
431 # HG_PENDING is set in the environment when the dirstate is being updated
431 # HG_PENDING is set in the environment when the dirstate is being updated
432 # in the middle of a transaction; we must not update our state in that
432 # in the middle of a transaction; we must not update our state in that
433 # case, or we risk forgetting about changes in the working copy.
433 # case, or we risk forgetting about changes in the working copy.
434 updatestate = (parentworking and match.always() and
434 updatestate = (parentworking and match.always() and
435 not isinstance(ctx2, (context.workingcommitctx,
435 not isinstance(ctx2, (context.workingcommitctx,
436 context.memctx)) and
436 context.memctx)) and
437 'HG_PENDING' not in os.environ)
437 'HG_PENDING' not in os.environ)
438
438
439 try:
439 try:
440 if self._fsmonitorstate.walk_on_invalidate:
440 if self._fsmonitorstate.walk_on_invalidate:
441 # Use a short timeout to query the current clock. If that
441 # Use a short timeout to query the current clock. If that
442 # takes too long then we assume that the service will be slow
442 # takes too long then we assume that the service will be slow
443 # to answer our query.
443 # to answer our query.
444 # walk_on_invalidate indicates that we prefer to walk the
444 # walk_on_invalidate indicates that we prefer to walk the
445 # tree ourselves because we can ignore portions that Watchman
445 # tree ourselves because we can ignore portions that Watchman
446 # cannot and we tend to be faster in the warmer buffer cache
446 # cannot and we tend to be faster in the warmer buffer cache
447 # cases.
447 # cases.
448 self._watchmanclient.settimeout(0.1)
448 self._watchmanclient.settimeout(0.1)
449 else:
449 else:
450 # Give Watchman more time to potentially complete its walk
450 # Give Watchman more time to potentially complete its walk
451 # and return the initial clock. In this mode we assume that
451 # and return the initial clock. In this mode we assume that
452 # the filesystem will be slower than parsing a potentially
452 # the filesystem will be slower than parsing a potentially
453 # very large Watchman result set.
453 # very large Watchman result set.
454 self._watchmanclient.settimeout(
454 self._watchmanclient.settimeout(
455 self._fsmonitorstate.timeout + 0.1)
455 self._fsmonitorstate.timeout + 0.1)
456 startclock = self._watchmanclient.getcurrentclock()
456 startclock = self._watchmanclient.getcurrentclock()
457 except Exception as ex:
457 except Exception as ex:
458 self._watchmanclient.clearconnection()
458 self._watchmanclient.clearconnection()
459 _handleunavailable(self.ui, self._fsmonitorstate, ex)
459 _handleunavailable(self.ui, self._fsmonitorstate, ex)
460 # boo, Watchman failed. bail
460 # boo, Watchman failed. bail
461 return orig(node1, node2, match, listignored, listclean,
461 return orig(node1, node2, match, listignored, listclean,
462 listunknown, listsubrepos)
462 listunknown, listsubrepos)
463
463
464 if updatestate:
464 if updatestate:
465 # We need info about unknown files. This may make things slower the
465 # We need info about unknown files. This may make things slower the
466 # first time, but whatever.
466 # first time, but whatever.
467 stateunknown = True
467 stateunknown = True
468 else:
468 else:
469 stateunknown = listunknown
469 stateunknown = listunknown
470
470
471 r = orig(node1, node2, match, listignored, listclean, stateunknown,
471 r = orig(node1, node2, match, listignored, listclean, stateunknown,
472 listsubrepos)
472 listsubrepos)
473 modified, added, removed, deleted, unknown, ignored, clean = r
473 modified, added, removed, deleted, unknown, ignored, clean = r
474
474
475 if updatestate:
475 if updatestate:
476 notefiles = modified + added + removed + deleted + unknown
476 notefiles = modified + added + removed + deleted + unknown
477 self._fsmonitorstate.set(
477 self._fsmonitorstate.set(
478 self._fsmonitorstate.getlastclock() or startclock,
478 self._fsmonitorstate.getlastclock() or startclock,
479 _hashignore(self.dirstate._ignore),
479 _hashignore(self.dirstate._ignore),
480 notefiles)
480 notefiles)
481
481
482 if not listunknown:
482 if not listunknown:
483 unknown = []
483 unknown = []
484
484
485 # don't do paranoid checks if we're not going to query Watchman anyway
485 # don't do paranoid checks if we're not going to query Watchman anyway
486 full = listclean or match.traversedir is not None
486 full = listclean or match.traversedir is not None
487 if self._fsmonitorstate.mode == 'paranoid' and not full:
487 if self._fsmonitorstate.mode == 'paranoid' and not full:
488 # run status again and fall back to the old walk this time
488 # run status again and fall back to the old walk this time
489 self.dirstate._fsmonitordisable = True
489 self.dirstate._fsmonitordisable = True
490
490
491 # shut the UI up
491 # shut the UI up
492 quiet = self.ui.quiet
492 quiet = self.ui.quiet
493 self.ui.quiet = True
493 self.ui.quiet = True
494 fout, ferr = self.ui.fout, self.ui.ferr
494 fout, ferr = self.ui.fout, self.ui.ferr
495 self.ui.fout = self.ui.ferr = open(os.devnull, 'wb')
495 self.ui.fout = self.ui.ferr = open(os.devnull, 'wb')
496
496
497 try:
497 try:
498 rv2 = orig(
498 rv2 = orig(
499 node1, node2, match, listignored, listclean, listunknown,
499 node1, node2, match, listignored, listclean, listunknown,
500 listsubrepos)
500 listsubrepos)
501 finally:
501 finally:
502 self.dirstate._fsmonitordisable = False
502 self.dirstate._fsmonitordisable = False
503 self.ui.quiet = quiet
503 self.ui.quiet = quiet
504 self.ui.fout, self.ui.ferr = fout, ferr
504 self.ui.fout, self.ui.ferr = fout, ferr
505
505
506 # clean isn't tested since it's set to True above
506 # clean isn't tested since it's set to True above
507 _cmpsets([modified, added, removed, deleted, unknown, ignored, clean],
507 _cmpsets([modified, added, removed, deleted, unknown, ignored, clean],
508 rv2)
508 rv2)
509 modified, added, removed, deleted, unknown, ignored, clean = rv2
509 modified, added, removed, deleted, unknown, ignored, clean = rv2
510
510
511 return scmutil.status(
511 return scmutil.status(
512 modified, added, removed, deleted, unknown, ignored, clean)
512 modified, added, removed, deleted, unknown, ignored, clean)
513
513
514 def makedirstate(cls):
514 def makedirstate(cls):
515 class fsmonitordirstate(cls):
515 class fsmonitordirstate(cls):
516 def _fsmonitorinit(self, fsmonitorstate, watchmanclient):
516 def _fsmonitorinit(self, fsmonitorstate, watchmanclient):
517 # _fsmonitordisable is used in paranoid mode
517 # _fsmonitordisable is used in paranoid mode
518 self._fsmonitordisable = False
518 self._fsmonitordisable = False
519 self._fsmonitorstate = fsmonitorstate
519 self._fsmonitorstate = fsmonitorstate
520 self._watchmanclient = watchmanclient
520 self._watchmanclient = watchmanclient
521
521
522 def walk(self, *args, **kwargs):
522 def walk(self, *args, **kwargs):
523 orig = super(fsmonitordirstate, self).walk
523 orig = super(fsmonitordirstate, self).walk
524 if self._fsmonitordisable:
524 if self._fsmonitordisable:
525 return orig(*args, **kwargs)
525 return orig(*args, **kwargs)
526 return overridewalk(orig, self, *args, **kwargs)
526 return overridewalk(orig, self, *args, **kwargs)
527
527
528 def rebuild(self, *args, **kwargs):
528 def rebuild(self, *args, **kwargs):
529 self._fsmonitorstate.invalidate()
529 self._fsmonitorstate.invalidate()
530 return super(fsmonitordirstate, self).rebuild(*args, **kwargs)
530 return super(fsmonitordirstate, self).rebuild(*args, **kwargs)
531
531
532 def invalidate(self, *args, **kwargs):
532 def invalidate(self, *args, **kwargs):
533 self._fsmonitorstate.invalidate()
533 self._fsmonitorstate.invalidate()
534 return super(fsmonitordirstate, self).invalidate(*args, **kwargs)
534 return super(fsmonitordirstate, self).invalidate(*args, **kwargs)
535
535
536 return fsmonitordirstate
536 return fsmonitordirstate
537
537
538 def wrapdirstate(orig, self):
538 def wrapdirstate(orig, self):
539 ds = orig(self)
539 ds = orig(self)
540 # only override the dirstate when Watchman is available for the repo
540 # only override the dirstate when Watchman is available for the repo
541 if util.safehasattr(self, '_fsmonitorstate'):
541 if util.safehasattr(self, '_fsmonitorstate'):
542 ds.__class__ = makedirstate(ds.__class__)
542 ds.__class__ = makedirstate(ds.__class__)
543 ds._fsmonitorinit(self._fsmonitorstate, self._watchmanclient)
543 ds._fsmonitorinit(self._fsmonitorstate, self._watchmanclient)
544 return ds
544 return ds
545
545
546 def extsetup(ui):
546 def extsetup(ui):
547 wrapfilecache(localrepo.localrepository, 'dirstate', wrapdirstate)
547 wrapfilecache(localrepo.localrepository, 'dirstate', wrapdirstate)
548 if sys.platform == 'darwin':
548 if sys.platform == 'darwin':
549 # An assist for avoiding the dangling-symlink fsevents bug
549 # An assist for avoiding the dangling-symlink fsevents bug
550 extensions.wrapfunction(os, 'symlink', wrapsymlink)
550 extensions.wrapfunction(os, 'symlink', wrapsymlink)
551
551
552 extensions.wrapfunction(merge, 'update', wrapupdate)
552 extensions.wrapfunction(merge, 'update', wrapupdate)
553
553
554 def wrapsymlink(orig, source, link_name):
554 def wrapsymlink(orig, source, link_name):
555 ''' if we create a dangling symlink, also touch the parent dir
555 ''' if we create a dangling symlink, also touch the parent dir
556 to encourage fsevents notifications to work more correctly '''
556 to encourage fsevents notifications to work more correctly '''
557 try:
557 try:
558 return orig(source, link_name)
558 return orig(source, link_name)
559 finally:
559 finally:
560 try:
560 try:
561 os.utime(os.path.dirname(link_name), None)
561 os.utime(os.path.dirname(link_name), None)
562 except OSError:
562 except OSError:
563 pass
563 pass
564
564
565 class state_update(object):
565 class state_update(object):
566 ''' This context mananger is responsible for dispatching the state-enter
566 ''' This context mananger is responsible for dispatching the state-enter
567 and state-leave signals to the watchman service '''
567 and state-leave signals to the watchman service '''
568
568
569 def __init__(self, repo, node, distance, partial):
569 def __init__(self, repo, node, distance, partial):
570 self.repo = repo
570 self.repo = repo
571 self.node = node
571 self.node = node
572 self.distance = distance
572 self.distance = distance
573 self.partial = partial
573 self.partial = partial
574
574
575 def __enter__(self):
575 def __enter__(self):
576 self._state('state-enter')
576 self._state('state-enter')
577 return self
577 return self
578
578
579 def __exit__(self, type_, value, tb):
579 def __exit__(self, type_, value, tb):
580 status = 'ok' if type_ is None else 'failed'
580 status = 'ok' if type_ is None else 'failed'
581 self._state('state-leave', status=status)
581 self._state('state-leave', status=status)
582
582
583 def _state(self, cmd, status='ok'):
583 def _state(self, cmd, status='ok'):
584 if not util.safehasattr(self.repo, '_watchmanclient'):
584 if not util.safehasattr(self.repo, '_watchmanclient'):
585 return
585 return
586 try:
586 try:
587 commithash = self.repo[self.node].hex()
587 commithash = self.repo[self.node].hex()
588 self.repo._watchmanclient.command(cmd, {
588 self.repo._watchmanclient.command(cmd, {
589 'name': 'hg.update',
589 'name': 'hg.update',
590 'metadata': {
590 'metadata': {
591 # the target revision
591 # the target revision
592 'rev': commithash,
592 'rev': commithash,
593 # approximate number of commits between current and target
593 # approximate number of commits between current and target
594 'distance': self.distance,
594 'distance': self.distance,
595 # success/failure (only really meaningful for state-leave)
595 # success/failure (only really meaningful for state-leave)
596 'status': status,
596 'status': status,
597 # whether the working copy parent is changing
597 # whether the working copy parent is changing
598 'partial': self.partial,
598 'partial': self.partial,
599 }})
599 }})
600 except Exception as e:
600 except Exception as e:
601 # Swallow any errors; fire and forget
601 # Swallow any errors; fire and forget
602 self.repo.ui.log(
602 self.repo.ui.log(
603 'watchman', 'Exception %s while running %s\n', e, cmd)
603 'watchman', 'Exception %s while running %s\n', e, cmd)
604
604
605 # Bracket working copy updates with calls to the watchman state-enter
605 # Bracket working copy updates with calls to the watchman state-enter
606 # and state-leave commands. This allows clients to perform more intelligent
606 # and state-leave commands. This allows clients to perform more intelligent
607 # settling during bulk file change scenarios
607 # settling during bulk file change scenarios
608 # https://facebook.github.io/watchman/docs/cmd/subscribe.html#advanced-settling
608 # https://facebook.github.io/watchman/docs/cmd/subscribe.html#advanced-settling
609 def wrapupdate(orig, repo, node, branchmerge, force, ancestor=None,
609 def wrapupdate(orig, repo, node, branchmerge, force, ancestor=None,
610 mergeancestor=False, labels=None, matcher=None, **kwargs):
610 mergeancestor=False, labels=None, matcher=None, **kwargs):
611
611
612 distance = 0
612 distance = 0
613 partial = True
613 partial = True
614 if matcher is None or matcher.always():
614 if matcher is None or matcher.always():
615 partial = False
615 partial = False
616 wc = repo[None]
616 wc = repo[None]
617 parents = wc.parents()
617 parents = wc.parents()
618 if len(parents) == 2:
618 if len(parents) == 2:
619 anc = repo.changelog.ancestor(parents[0].node(), parents[1].node())
619 anc = repo.changelog.ancestor(parents[0].node(), parents[1].node())
620 ancrev = repo[anc].rev()
620 ancrev = repo[anc].rev()
621 distance = abs(repo[node].rev() - ancrev)
621 distance = abs(repo[node].rev() - ancrev)
622 elif len(parents) == 1:
622 elif len(parents) == 1:
623 distance = abs(repo[node].rev() - parents[0].rev())
623 distance = abs(repo[node].rev() - parents[0].rev())
624
624
625 with state_update(repo, node, distance, partial):
625 with state_update(repo, node, distance, partial):
626 return orig(
626 return orig(
627 repo, node, branchmerge, force, ancestor, mergeancestor,
627 repo, node, branchmerge, force, ancestor, mergeancestor,
628 labels, matcher, *kwargs)
628 labels, matcher, *kwargs)
629
629
630 def reposetup(ui, repo):
630 def reposetup(ui, repo):
631 # We don't work with largefiles or inotify
631 # We don't work with largefiles or inotify
632 exts = extensions.enabled()
632 exts = extensions.enabled()
633 for ext in _blacklist:
633 for ext in _blacklist:
634 if ext in exts:
634 if ext in exts:
635 ui.warn(_('The fsmonitor extension is incompatible with the %s '
635 ui.warn(_('The fsmonitor extension is incompatible with the %s '
636 'extension and has been disabled.\n') % ext)
636 'extension and has been disabled.\n') % ext)
637 return
637 return
638
638
639 if util.safehasattr(repo, 'dirstate'):
639 if util.safehasattr(repo, 'dirstate'):
640 # We don't work with subrepos either. Note that we can get passed in
640 # We don't work with subrepos either. Note that we can get passed in
641 # e.g. a statichttprepo, which throws on trying to access the substate.
641 # e.g. a statichttprepo, which throws on trying to access the substate.
642 # XXX This sucks.
642 # XXX This sucks.
643 try:
643 try:
644 # if repo[None].substate can cause a dirstate parse, which is too
644 # if repo[None].substate can cause a dirstate parse, which is too
645 # slow. Instead, look for a file called hgsubstate,
645 # slow. Instead, look for a file called hgsubstate,
646 if repo.wvfs.exists('.hgsubstate') or repo.wvfs.exists('.hgsub'):
646 if repo.wvfs.exists('.hgsubstate') or repo.wvfs.exists('.hgsub'):
647 return
647 return
648 except AttributeError:
648 except AttributeError:
649 return
649 return
650
650
651 fsmonitorstate = state.state(repo)
651 fsmonitorstate = state.state(repo)
652 if fsmonitorstate.mode == 'off':
652 if fsmonitorstate.mode == 'off':
653 return
653 return
654
654
655 try:
655 try:
656 client = watchmanclient.client(repo)
656 client = watchmanclient.client(repo)
657 except Exception as ex:
657 except Exception as ex:
658 _handleunavailable(ui, fsmonitorstate, ex)
658 _handleunavailable(ui, fsmonitorstate, ex)
659 return
659 return
660
660
661 repo._fsmonitorstate = fsmonitorstate
661 repo._fsmonitorstate = fsmonitorstate
662 repo._watchmanclient = client
662 repo._watchmanclient = client
663
663
664 # at this point since fsmonitorstate wasn't present, repo.dirstate is
664 # at this point since fsmonitorstate wasn't present, repo.dirstate is
665 # not a fsmonitordirstate
665 # not a fsmonitordirstate
666 repo.dirstate.__class__ = makedirstate(repo.dirstate.__class__)
666 repo.dirstate.__class__ = makedirstate(repo.dirstate.__class__)
667 # nuke the dirstate so that _fsmonitorinit and subsequent configuration
667 # nuke the dirstate so that _fsmonitorinit and subsequent configuration
668 # changes take effect on it
668 # changes take effect on it
669 del repo._filecache['dirstate']
669 del repo._filecache['dirstate']
670 delattr(repo.unfiltered(), 'dirstate')
670 delattr(repo.unfiltered(), 'dirstate')
671
671
672 class fsmonitorrepo(repo.__class__):
672 class fsmonitorrepo(repo.__class__):
673 def status(self, *args, **kwargs):
673 def status(self, *args, **kwargs):
674 orig = super(fsmonitorrepo, self).status
674 orig = super(fsmonitorrepo, self).status
675 return overridestatus(orig, self, *args, **kwargs)
675 return overridestatus(orig, self, *args, **kwargs)
676
676
677 repo.__class__ = fsmonitorrepo
677 repo.__class__ = fsmonitorrepo
678
678
679 def wrapfilecache(cls, propname, wrapper):
679 def wrapfilecache(cls, propname, wrapper):
680 """Wraps a filecache property. These can't be wrapped using the normal
680 """Wraps a filecache property. These can't be wrapped using the normal
681 wrapfunction. This should eventually go into upstream Mercurial.
681 wrapfunction. This should eventually go into upstream Mercurial.
682 """
682 """
683 assert callable(wrapper)
683 assert callable(wrapper)
684 for currcls in cls.__mro__:
684 for currcls in cls.__mro__:
685 if propname in currcls.__dict__:
685 if propname in currcls.__dict__:
686 origfn = currcls.__dict__[propname].func
686 origfn = currcls.__dict__[propname].func
687 assert callable(origfn)
687 assert callable(origfn)
688 def wrap(*args, **kwargs):
688 def wrap(*args, **kwargs):
689 return wrapper(origfn, *args, **kwargs)
689 return wrapper(origfn, *args, **kwargs)
690 currcls.__dict__[propname].func = wrap
690 currcls.__dict__[propname].func = wrap
691 break
691 break
692
692
693 if currcls is object:
693 if currcls is object:
694 raise AttributeError(
694 raise AttributeError(
695 _("type '%s' has no property '%s'") % (cls, propname))
695 _("type '%s' has no property '%s'") % (cls, propname))
@@ -1,318 +1,318 b''
1 # Copyright 2005, 2006 Benoit Boissinot <benoit.boissinot@ens-lyon.org>
1 # Copyright 2005, 2006 Benoit Boissinot <benoit.boissinot@ens-lyon.org>
2 #
2 #
3 # This software may be used and distributed according to the terms of the
3 # This software may be used and distributed according to the terms of the
4 # GNU General Public License version 2 or any later version.
4 # GNU General Public License version 2 or any later version.
5
5
6 '''commands to sign and verify changesets'''
6 '''commands to sign and verify changesets'''
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import binascii
10 import binascii
11 import os
11 import os
12 import tempfile
12 import tempfile
13
13
14 from mercurial.i18n import _
14 from mercurial.i18n import _
15 from mercurial import (
15 from mercurial import (
16 cmdutil,
16 cmdutil,
17 commands,
17 commands,
18 error,
18 error,
19 match,
19 match,
20 node as hgnode,
20 node as hgnode,
21 util,
21 util,
22 )
22 )
23
23
24 cmdtable = {}
24 cmdtable = {}
25 command = cmdutil.command(cmdtable)
25 command = cmdutil.command(cmdtable)
26 # Note for extension authors: ONLY specify testedwith = 'internal' for
26 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
27 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
27 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
28 # be specifying the version(s) of Mercurial they are tested with, or
28 # be specifying the version(s) of Mercurial they are tested with, or
29 # leave the attribute unspecified.
29 # leave the attribute unspecified.
30 testedwith = 'internal'
30 testedwith = 'ships-with-hg-core'
31
31
32 class gpg(object):
32 class gpg(object):
33 def __init__(self, path, key=None):
33 def __init__(self, path, key=None):
34 self.path = path
34 self.path = path
35 self.key = (key and " --local-user \"%s\"" % key) or ""
35 self.key = (key and " --local-user \"%s\"" % key) or ""
36
36
37 def sign(self, data):
37 def sign(self, data):
38 gpgcmd = "%s --sign --detach-sign%s" % (self.path, self.key)
38 gpgcmd = "%s --sign --detach-sign%s" % (self.path, self.key)
39 return util.filter(data, gpgcmd)
39 return util.filter(data, gpgcmd)
40
40
41 def verify(self, data, sig):
41 def verify(self, data, sig):
42 """ returns of the good and bad signatures"""
42 """ returns of the good and bad signatures"""
43 sigfile = datafile = None
43 sigfile = datafile = None
44 try:
44 try:
45 # create temporary files
45 # create temporary files
46 fd, sigfile = tempfile.mkstemp(prefix="hg-gpg-", suffix=".sig")
46 fd, sigfile = tempfile.mkstemp(prefix="hg-gpg-", suffix=".sig")
47 fp = os.fdopen(fd, 'wb')
47 fp = os.fdopen(fd, 'wb')
48 fp.write(sig)
48 fp.write(sig)
49 fp.close()
49 fp.close()
50 fd, datafile = tempfile.mkstemp(prefix="hg-gpg-", suffix=".txt")
50 fd, datafile = tempfile.mkstemp(prefix="hg-gpg-", suffix=".txt")
51 fp = os.fdopen(fd, 'wb')
51 fp = os.fdopen(fd, 'wb')
52 fp.write(data)
52 fp.write(data)
53 fp.close()
53 fp.close()
54 gpgcmd = ("%s --logger-fd 1 --status-fd 1 --verify "
54 gpgcmd = ("%s --logger-fd 1 --status-fd 1 --verify "
55 "\"%s\" \"%s\"" % (self.path, sigfile, datafile))
55 "\"%s\" \"%s\"" % (self.path, sigfile, datafile))
56 ret = util.filter("", gpgcmd)
56 ret = util.filter("", gpgcmd)
57 finally:
57 finally:
58 for f in (sigfile, datafile):
58 for f in (sigfile, datafile):
59 try:
59 try:
60 if f:
60 if f:
61 os.unlink(f)
61 os.unlink(f)
62 except OSError:
62 except OSError:
63 pass
63 pass
64 keys = []
64 keys = []
65 key, fingerprint = None, None
65 key, fingerprint = None, None
66 for l in ret.splitlines():
66 for l in ret.splitlines():
67 # see DETAILS in the gnupg documentation
67 # see DETAILS in the gnupg documentation
68 # filter the logger output
68 # filter the logger output
69 if not l.startswith("[GNUPG:]"):
69 if not l.startswith("[GNUPG:]"):
70 continue
70 continue
71 l = l[9:]
71 l = l[9:]
72 if l.startswith("VALIDSIG"):
72 if l.startswith("VALIDSIG"):
73 # fingerprint of the primary key
73 # fingerprint of the primary key
74 fingerprint = l.split()[10]
74 fingerprint = l.split()[10]
75 elif l.startswith("ERRSIG"):
75 elif l.startswith("ERRSIG"):
76 key = l.split(" ", 3)[:2]
76 key = l.split(" ", 3)[:2]
77 key.append("")
77 key.append("")
78 fingerprint = None
78 fingerprint = None
79 elif (l.startswith("GOODSIG") or
79 elif (l.startswith("GOODSIG") or
80 l.startswith("EXPSIG") or
80 l.startswith("EXPSIG") or
81 l.startswith("EXPKEYSIG") or
81 l.startswith("EXPKEYSIG") or
82 l.startswith("BADSIG")):
82 l.startswith("BADSIG")):
83 if key is not None:
83 if key is not None:
84 keys.append(key + [fingerprint])
84 keys.append(key + [fingerprint])
85 key = l.split(" ", 2)
85 key = l.split(" ", 2)
86 fingerprint = None
86 fingerprint = None
87 if key is not None:
87 if key is not None:
88 keys.append(key + [fingerprint])
88 keys.append(key + [fingerprint])
89 return keys
89 return keys
90
90
91 def newgpg(ui, **opts):
91 def newgpg(ui, **opts):
92 """create a new gpg instance"""
92 """create a new gpg instance"""
93 gpgpath = ui.config("gpg", "cmd", "gpg")
93 gpgpath = ui.config("gpg", "cmd", "gpg")
94 gpgkey = opts.get('key')
94 gpgkey = opts.get('key')
95 if not gpgkey:
95 if not gpgkey:
96 gpgkey = ui.config("gpg", "key", None)
96 gpgkey = ui.config("gpg", "key", None)
97 return gpg(gpgpath, gpgkey)
97 return gpg(gpgpath, gpgkey)
98
98
99 def sigwalk(repo):
99 def sigwalk(repo):
100 """
100 """
101 walk over every sigs, yields a couple
101 walk over every sigs, yields a couple
102 ((node, version, sig), (filename, linenumber))
102 ((node, version, sig), (filename, linenumber))
103 """
103 """
104 def parsefile(fileiter, context):
104 def parsefile(fileiter, context):
105 ln = 1
105 ln = 1
106 for l in fileiter:
106 for l in fileiter:
107 if not l:
107 if not l:
108 continue
108 continue
109 yield (l.split(" ", 2), (context, ln))
109 yield (l.split(" ", 2), (context, ln))
110 ln += 1
110 ln += 1
111
111
112 # read the heads
112 # read the heads
113 fl = repo.file(".hgsigs")
113 fl = repo.file(".hgsigs")
114 for r in reversed(fl.heads()):
114 for r in reversed(fl.heads()):
115 fn = ".hgsigs|%s" % hgnode.short(r)
115 fn = ".hgsigs|%s" % hgnode.short(r)
116 for item in parsefile(fl.read(r).splitlines(), fn):
116 for item in parsefile(fl.read(r).splitlines(), fn):
117 yield item
117 yield item
118 try:
118 try:
119 # read local signatures
119 # read local signatures
120 fn = "localsigs"
120 fn = "localsigs"
121 for item in parsefile(repo.vfs(fn), fn):
121 for item in parsefile(repo.vfs(fn), fn):
122 yield item
122 yield item
123 except IOError:
123 except IOError:
124 pass
124 pass
125
125
126 def getkeys(ui, repo, mygpg, sigdata, context):
126 def getkeys(ui, repo, mygpg, sigdata, context):
127 """get the keys who signed a data"""
127 """get the keys who signed a data"""
128 fn, ln = context
128 fn, ln = context
129 node, version, sig = sigdata
129 node, version, sig = sigdata
130 prefix = "%s:%d" % (fn, ln)
130 prefix = "%s:%d" % (fn, ln)
131 node = hgnode.bin(node)
131 node = hgnode.bin(node)
132
132
133 data = node2txt(repo, node, version)
133 data = node2txt(repo, node, version)
134 sig = binascii.a2b_base64(sig)
134 sig = binascii.a2b_base64(sig)
135 keys = mygpg.verify(data, sig)
135 keys = mygpg.verify(data, sig)
136
136
137 validkeys = []
137 validkeys = []
138 # warn for expired key and/or sigs
138 # warn for expired key and/or sigs
139 for key in keys:
139 for key in keys:
140 if key[0] == "ERRSIG":
140 if key[0] == "ERRSIG":
141 ui.write(_("%s Unknown key ID \"%s\"\n")
141 ui.write(_("%s Unknown key ID \"%s\"\n")
142 % (prefix, shortkey(ui, key[1][:15])))
142 % (prefix, shortkey(ui, key[1][:15])))
143 continue
143 continue
144 if key[0] == "BADSIG":
144 if key[0] == "BADSIG":
145 ui.write(_("%s Bad signature from \"%s\"\n") % (prefix, key[2]))
145 ui.write(_("%s Bad signature from \"%s\"\n") % (prefix, key[2]))
146 continue
146 continue
147 if key[0] == "EXPSIG":
147 if key[0] == "EXPSIG":
148 ui.write(_("%s Note: Signature has expired"
148 ui.write(_("%s Note: Signature has expired"
149 " (signed by: \"%s\")\n") % (prefix, key[2]))
149 " (signed by: \"%s\")\n") % (prefix, key[2]))
150 elif key[0] == "EXPKEYSIG":
150 elif key[0] == "EXPKEYSIG":
151 ui.write(_("%s Note: This key has expired"
151 ui.write(_("%s Note: This key has expired"
152 " (signed by: \"%s\")\n") % (prefix, key[2]))
152 " (signed by: \"%s\")\n") % (prefix, key[2]))
153 validkeys.append((key[1], key[2], key[3]))
153 validkeys.append((key[1], key[2], key[3]))
154 return validkeys
154 return validkeys
155
155
156 @command("sigs", [], _('hg sigs'))
156 @command("sigs", [], _('hg sigs'))
157 def sigs(ui, repo):
157 def sigs(ui, repo):
158 """list signed changesets"""
158 """list signed changesets"""
159 mygpg = newgpg(ui)
159 mygpg = newgpg(ui)
160 revs = {}
160 revs = {}
161
161
162 for data, context in sigwalk(repo):
162 for data, context in sigwalk(repo):
163 node, version, sig = data
163 node, version, sig = data
164 fn, ln = context
164 fn, ln = context
165 try:
165 try:
166 n = repo.lookup(node)
166 n = repo.lookup(node)
167 except KeyError:
167 except KeyError:
168 ui.warn(_("%s:%d node does not exist\n") % (fn, ln))
168 ui.warn(_("%s:%d node does not exist\n") % (fn, ln))
169 continue
169 continue
170 r = repo.changelog.rev(n)
170 r = repo.changelog.rev(n)
171 keys = getkeys(ui, repo, mygpg, data, context)
171 keys = getkeys(ui, repo, mygpg, data, context)
172 if not keys:
172 if not keys:
173 continue
173 continue
174 revs.setdefault(r, [])
174 revs.setdefault(r, [])
175 revs[r].extend(keys)
175 revs[r].extend(keys)
176 for rev in sorted(revs, reverse=True):
176 for rev in sorted(revs, reverse=True):
177 for k in revs[rev]:
177 for k in revs[rev]:
178 r = "%5d:%s" % (rev, hgnode.hex(repo.changelog.node(rev)))
178 r = "%5d:%s" % (rev, hgnode.hex(repo.changelog.node(rev)))
179 ui.write("%-30s %s\n" % (keystr(ui, k), r))
179 ui.write("%-30s %s\n" % (keystr(ui, k), r))
180
180
181 @command("sigcheck", [], _('hg sigcheck REV'))
181 @command("sigcheck", [], _('hg sigcheck REV'))
182 def sigcheck(ui, repo, rev):
182 def sigcheck(ui, repo, rev):
183 """verify all the signatures there may be for a particular revision"""
183 """verify all the signatures there may be for a particular revision"""
184 mygpg = newgpg(ui)
184 mygpg = newgpg(ui)
185 rev = repo.lookup(rev)
185 rev = repo.lookup(rev)
186 hexrev = hgnode.hex(rev)
186 hexrev = hgnode.hex(rev)
187 keys = []
187 keys = []
188
188
189 for data, context in sigwalk(repo):
189 for data, context in sigwalk(repo):
190 node, version, sig = data
190 node, version, sig = data
191 if node == hexrev:
191 if node == hexrev:
192 k = getkeys(ui, repo, mygpg, data, context)
192 k = getkeys(ui, repo, mygpg, data, context)
193 if k:
193 if k:
194 keys.extend(k)
194 keys.extend(k)
195
195
196 if not keys:
196 if not keys:
197 ui.write(_("no valid signature for %s\n") % hgnode.short(rev))
197 ui.write(_("no valid signature for %s\n") % hgnode.short(rev))
198 return
198 return
199
199
200 # print summary
200 # print summary
201 ui.write(_("%s is signed by:\n") % hgnode.short(rev))
201 ui.write(_("%s is signed by:\n") % hgnode.short(rev))
202 for key in keys:
202 for key in keys:
203 ui.write(" %s\n" % keystr(ui, key))
203 ui.write(" %s\n" % keystr(ui, key))
204
204
205 def keystr(ui, key):
205 def keystr(ui, key):
206 """associate a string to a key (username, comment)"""
206 """associate a string to a key (username, comment)"""
207 keyid, user, fingerprint = key
207 keyid, user, fingerprint = key
208 comment = ui.config("gpg", fingerprint, None)
208 comment = ui.config("gpg", fingerprint, None)
209 if comment:
209 if comment:
210 return "%s (%s)" % (user, comment)
210 return "%s (%s)" % (user, comment)
211 else:
211 else:
212 return user
212 return user
213
213
214 @command("sign",
214 @command("sign",
215 [('l', 'local', None, _('make the signature local')),
215 [('l', 'local', None, _('make the signature local')),
216 ('f', 'force', None, _('sign even if the sigfile is modified')),
216 ('f', 'force', None, _('sign even if the sigfile is modified')),
217 ('', 'no-commit', None, _('do not commit the sigfile after signing')),
217 ('', 'no-commit', None, _('do not commit the sigfile after signing')),
218 ('k', 'key', '',
218 ('k', 'key', '',
219 _('the key id to sign with'), _('ID')),
219 _('the key id to sign with'), _('ID')),
220 ('m', 'message', '',
220 ('m', 'message', '',
221 _('use text as commit message'), _('TEXT')),
221 _('use text as commit message'), _('TEXT')),
222 ('e', 'edit', False, _('invoke editor on commit messages')),
222 ('e', 'edit', False, _('invoke editor on commit messages')),
223 ] + commands.commitopts2,
223 ] + commands.commitopts2,
224 _('hg sign [OPTION]... [REV]...'))
224 _('hg sign [OPTION]... [REV]...'))
225 def sign(ui, repo, *revs, **opts):
225 def sign(ui, repo, *revs, **opts):
226 """add a signature for the current or given revision
226 """add a signature for the current or given revision
227
227
228 If no revision is given, the parent of the working directory is used,
228 If no revision is given, the parent of the working directory is used,
229 or tip if no revision is checked out.
229 or tip if no revision is checked out.
230
230
231 The ``gpg.cmd`` config setting can be used to specify the command
231 The ``gpg.cmd`` config setting can be used to specify the command
232 to run. A default key can be specified with ``gpg.key``.
232 to run. A default key can be specified with ``gpg.key``.
233
233
234 See :hg:`help dates` for a list of formats valid for -d/--date.
234 See :hg:`help dates` for a list of formats valid for -d/--date.
235 """
235 """
236 with repo.wlock():
236 with repo.wlock():
237 return _dosign(ui, repo, *revs, **opts)
237 return _dosign(ui, repo, *revs, **opts)
238
238
239 def _dosign(ui, repo, *revs, **opts):
239 def _dosign(ui, repo, *revs, **opts):
240 mygpg = newgpg(ui, **opts)
240 mygpg = newgpg(ui, **opts)
241 sigver = "0"
241 sigver = "0"
242 sigmessage = ""
242 sigmessage = ""
243
243
244 date = opts.get('date')
244 date = opts.get('date')
245 if date:
245 if date:
246 opts['date'] = util.parsedate(date)
246 opts['date'] = util.parsedate(date)
247
247
248 if revs:
248 if revs:
249 nodes = [repo.lookup(n) for n in revs]
249 nodes = [repo.lookup(n) for n in revs]
250 else:
250 else:
251 nodes = [node for node in repo.dirstate.parents()
251 nodes = [node for node in repo.dirstate.parents()
252 if node != hgnode.nullid]
252 if node != hgnode.nullid]
253 if len(nodes) > 1:
253 if len(nodes) > 1:
254 raise error.Abort(_('uncommitted merge - please provide a '
254 raise error.Abort(_('uncommitted merge - please provide a '
255 'specific revision'))
255 'specific revision'))
256 if not nodes:
256 if not nodes:
257 nodes = [repo.changelog.tip()]
257 nodes = [repo.changelog.tip()]
258
258
259 for n in nodes:
259 for n in nodes:
260 hexnode = hgnode.hex(n)
260 hexnode = hgnode.hex(n)
261 ui.write(_("signing %d:%s\n") % (repo.changelog.rev(n),
261 ui.write(_("signing %d:%s\n") % (repo.changelog.rev(n),
262 hgnode.short(n)))
262 hgnode.short(n)))
263 # build data
263 # build data
264 data = node2txt(repo, n, sigver)
264 data = node2txt(repo, n, sigver)
265 sig = mygpg.sign(data)
265 sig = mygpg.sign(data)
266 if not sig:
266 if not sig:
267 raise error.Abort(_("error while signing"))
267 raise error.Abort(_("error while signing"))
268 sig = binascii.b2a_base64(sig)
268 sig = binascii.b2a_base64(sig)
269 sig = sig.replace("\n", "")
269 sig = sig.replace("\n", "")
270 sigmessage += "%s %s %s\n" % (hexnode, sigver, sig)
270 sigmessage += "%s %s %s\n" % (hexnode, sigver, sig)
271
271
272 # write it
272 # write it
273 if opts['local']:
273 if opts['local']:
274 repo.vfs.append("localsigs", sigmessage)
274 repo.vfs.append("localsigs", sigmessage)
275 return
275 return
276
276
277 if not opts["force"]:
277 if not opts["force"]:
278 msigs = match.exact(repo.root, '', ['.hgsigs'])
278 msigs = match.exact(repo.root, '', ['.hgsigs'])
279 if any(repo.status(match=msigs, unknown=True, ignored=True)):
279 if any(repo.status(match=msigs, unknown=True, ignored=True)):
280 raise error.Abort(_("working copy of .hgsigs is changed "),
280 raise error.Abort(_("working copy of .hgsigs is changed "),
281 hint=_("please commit .hgsigs manually"))
281 hint=_("please commit .hgsigs manually"))
282
282
283 sigsfile = repo.wfile(".hgsigs", "ab")
283 sigsfile = repo.wfile(".hgsigs", "ab")
284 sigsfile.write(sigmessage)
284 sigsfile.write(sigmessage)
285 sigsfile.close()
285 sigsfile.close()
286
286
287 if '.hgsigs' not in repo.dirstate:
287 if '.hgsigs' not in repo.dirstate:
288 repo[None].add([".hgsigs"])
288 repo[None].add([".hgsigs"])
289
289
290 if opts["no_commit"]:
290 if opts["no_commit"]:
291 return
291 return
292
292
293 message = opts['message']
293 message = opts['message']
294 if not message:
294 if not message:
295 # we don't translate commit messages
295 # we don't translate commit messages
296 message = "\n".join(["Added signature for changeset %s"
296 message = "\n".join(["Added signature for changeset %s"
297 % hgnode.short(n)
297 % hgnode.short(n)
298 for n in nodes])
298 for n in nodes])
299 try:
299 try:
300 editor = cmdutil.getcommiteditor(editform='gpg.sign', **opts)
300 editor = cmdutil.getcommiteditor(editform='gpg.sign', **opts)
301 repo.commit(message, opts['user'], opts['date'], match=msigs,
301 repo.commit(message, opts['user'], opts['date'], match=msigs,
302 editor=editor)
302 editor=editor)
303 except ValueError as inst:
303 except ValueError as inst:
304 raise error.Abort(str(inst))
304 raise error.Abort(str(inst))
305
305
306 def shortkey(ui, key):
306 def shortkey(ui, key):
307 if len(key) != 16:
307 if len(key) != 16:
308 ui.debug("key ID \"%s\" format error\n" % key)
308 ui.debug("key ID \"%s\" format error\n" % key)
309 return key
309 return key
310
310
311 return key[-8:]
311 return key[-8:]
312
312
313 def node2txt(repo, node, ver):
313 def node2txt(repo, node, ver):
314 """map a manifest into some text"""
314 """map a manifest into some text"""
315 if ver == "0":
315 if ver == "0":
316 return "%s\n" % hgnode.hex(node)
316 return "%s\n" % hgnode.hex(node)
317 else:
317 else:
318 raise error.Abort(_("unknown signature version"))
318 raise error.Abort(_("unknown signature version"))
@@ -1,69 +1,69 b''
1 # ASCII graph log extension for Mercurial
1 # ASCII graph log extension for Mercurial
2 #
2 #
3 # Copyright 2007 Joel Rosdahl <joel@rosdahl.net>
3 # Copyright 2007 Joel Rosdahl <joel@rosdahl.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 '''command to view revision graphs from a shell (DEPRECATED)
8 '''command to view revision graphs from a shell (DEPRECATED)
9
9
10 The functionality of this extension has been include in core Mercurial
10 The functionality of this extension has been include in core Mercurial
11 since version 2.3. Please use :hg:`log -G ...` instead.
11 since version 2.3. Please use :hg:`log -G ...` instead.
12
12
13 This extension adds a --graph option to the incoming, outgoing and log
13 This extension adds a --graph option to the incoming, outgoing and log
14 commands. When this options is given, an ASCII representation of the
14 commands. When this options is given, an ASCII representation of the
15 revision graph is also shown.
15 revision graph is also shown.
16 '''
16 '''
17
17
18 from __future__ import absolute_import
18 from __future__ import absolute_import
19
19
20 from mercurial.i18n import _
20 from mercurial.i18n import _
21 from mercurial import (
21 from mercurial import (
22 cmdutil,
22 cmdutil,
23 commands,
23 commands,
24 )
24 )
25
25
26 cmdtable = {}
26 cmdtable = {}
27 command = cmdutil.command(cmdtable)
27 command = cmdutil.command(cmdtable)
28 # Note for extension authors: ONLY specify testedwith = 'internal' for
28 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
29 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
29 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
30 # be specifying the version(s) of Mercurial they are tested with, or
30 # be specifying the version(s) of Mercurial they are tested with, or
31 # leave the attribute unspecified.
31 # leave the attribute unspecified.
32 testedwith = 'internal'
32 testedwith = 'ships-with-hg-core'
33
33
34 @command('glog',
34 @command('glog',
35 [('f', 'follow', None,
35 [('f', 'follow', None,
36 _('follow changeset history, or file history across copies and renames')),
36 _('follow changeset history, or file history across copies and renames')),
37 ('', 'follow-first', None,
37 ('', 'follow-first', None,
38 _('only follow the first parent of merge changesets (DEPRECATED)')),
38 _('only follow the first parent of merge changesets (DEPRECATED)')),
39 ('d', 'date', '', _('show revisions matching date spec'), _('DATE')),
39 ('d', 'date', '', _('show revisions matching date spec'), _('DATE')),
40 ('C', 'copies', None, _('show copied files')),
40 ('C', 'copies', None, _('show copied files')),
41 ('k', 'keyword', [],
41 ('k', 'keyword', [],
42 _('do case-insensitive search for a given text'), _('TEXT')),
42 _('do case-insensitive search for a given text'), _('TEXT')),
43 ('r', 'rev', [], _('show the specified revision or revset'), _('REV')),
43 ('r', 'rev', [], _('show the specified revision or revset'), _('REV')),
44 ('', 'removed', None, _('include revisions where files were removed')),
44 ('', 'removed', None, _('include revisions where files were removed')),
45 ('m', 'only-merges', None, _('show only merges (DEPRECATED)')),
45 ('m', 'only-merges', None, _('show only merges (DEPRECATED)')),
46 ('u', 'user', [], _('revisions committed by user'), _('USER')),
46 ('u', 'user', [], _('revisions committed by user'), _('USER')),
47 ('', 'only-branch', [],
47 ('', 'only-branch', [],
48 _('show only changesets within the given named branch (DEPRECATED)'),
48 _('show only changesets within the given named branch (DEPRECATED)'),
49 _('BRANCH')),
49 _('BRANCH')),
50 ('b', 'branch', [],
50 ('b', 'branch', [],
51 _('show changesets within the given named branch'), _('BRANCH')),
51 _('show changesets within the given named branch'), _('BRANCH')),
52 ('P', 'prune', [],
52 ('P', 'prune', [],
53 _('do not display revision or any of its ancestors'), _('REV')),
53 _('do not display revision or any of its ancestors'), _('REV')),
54 ] + commands.logopts + commands.walkopts,
54 ] + commands.logopts + commands.walkopts,
55 _('[OPTION]... [FILE]'),
55 _('[OPTION]... [FILE]'),
56 inferrepo=True)
56 inferrepo=True)
57 def glog(ui, repo, *pats, **opts):
57 def glog(ui, repo, *pats, **opts):
58 """show revision history alongside an ASCII revision graph
58 """show revision history alongside an ASCII revision graph
59
59
60 Print a revision history alongside a revision graph drawn with
60 Print a revision history alongside a revision graph drawn with
61 ASCII characters.
61 ASCII characters.
62
62
63 Nodes printed as an @ character are parents of the working
63 Nodes printed as an @ character are parents of the working
64 directory.
64 directory.
65
65
66 This is an alias to :hg:`log -G`.
66 This is an alias to :hg:`log -G`.
67 """
67 """
68 opts['graph'] = True
68 opts['graph'] = True
69 return commands.log(ui, repo, *pats, **opts)
69 return commands.log(ui, repo, *pats, **opts)
@@ -1,348 +1,348 b''
1 # Minimal support for git commands on an hg repository
1 # Minimal support for git commands on an hg repository
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 '''browse the repository in a graphical way
8 '''browse the repository in a graphical way
9
9
10 The hgk extension allows browsing the history of a repository in a
10 The hgk extension allows browsing the history of a repository in a
11 graphical way. It requires Tcl/Tk version 8.4 or later. (Tcl/Tk is not
11 graphical way. It requires Tcl/Tk version 8.4 or later. (Tcl/Tk is not
12 distributed with Mercurial.)
12 distributed with Mercurial.)
13
13
14 hgk consists of two parts: a Tcl script that does the displaying and
14 hgk consists of two parts: a Tcl script that does the displaying and
15 querying of information, and an extension to Mercurial named hgk.py,
15 querying of information, and an extension to Mercurial named hgk.py,
16 which provides hooks for hgk to get information. hgk can be found in
16 which provides hooks for hgk to get information. hgk can be found in
17 the contrib directory, and the extension is shipped in the hgext
17 the contrib directory, and the extension is shipped in the hgext
18 repository, and needs to be enabled.
18 repository, and needs to be enabled.
19
19
20 The :hg:`view` command will launch the hgk Tcl script. For this command
20 The :hg:`view` command will launch the hgk Tcl script. For this command
21 to work, hgk must be in your search path. Alternately, you can specify
21 to work, hgk must be in your search path. Alternately, you can specify
22 the path to hgk in your configuration file::
22 the path to hgk in your configuration file::
23
23
24 [hgk]
24 [hgk]
25 path = /location/of/hgk
25 path = /location/of/hgk
26
26
27 hgk can make use of the extdiff extension to visualize revisions.
27 hgk can make use of the extdiff extension to visualize revisions.
28 Assuming you had already configured extdiff vdiff command, just add::
28 Assuming you had already configured extdiff vdiff command, just add::
29
29
30 [hgk]
30 [hgk]
31 vdiff=vdiff
31 vdiff=vdiff
32
32
33 Revisions context menu will now display additional entries to fire
33 Revisions context menu will now display additional entries to fire
34 vdiff on hovered and selected revisions.
34 vdiff on hovered and selected revisions.
35 '''
35 '''
36
36
37 from __future__ import absolute_import
37 from __future__ import absolute_import
38
38
39 import os
39 import os
40
40
41 from mercurial.i18n import _
41 from mercurial.i18n import _
42 from mercurial.node import (
42 from mercurial.node import (
43 nullid,
43 nullid,
44 nullrev,
44 nullrev,
45 short,
45 short,
46 )
46 )
47 from mercurial import (
47 from mercurial import (
48 cmdutil,
48 cmdutil,
49 commands,
49 commands,
50 obsolete,
50 obsolete,
51 patch,
51 patch,
52 scmutil,
52 scmutil,
53 )
53 )
54
54
55 cmdtable = {}
55 cmdtable = {}
56 command = cmdutil.command(cmdtable)
56 command = cmdutil.command(cmdtable)
57 # Note for extension authors: ONLY specify testedwith = 'internal' for
57 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
58 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
58 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
59 # be specifying the version(s) of Mercurial they are tested with, or
59 # be specifying the version(s) of Mercurial they are tested with, or
60 # leave the attribute unspecified.
60 # leave the attribute unspecified.
61 testedwith = 'internal'
61 testedwith = 'ships-with-hg-core'
62
62
63 @command('debug-diff-tree',
63 @command('debug-diff-tree',
64 [('p', 'patch', None, _('generate patch')),
64 [('p', 'patch', None, _('generate patch')),
65 ('r', 'recursive', None, _('recursive')),
65 ('r', 'recursive', None, _('recursive')),
66 ('P', 'pretty', None, _('pretty')),
66 ('P', 'pretty', None, _('pretty')),
67 ('s', 'stdin', None, _('stdin')),
67 ('s', 'stdin', None, _('stdin')),
68 ('C', 'copy', None, _('detect copies')),
68 ('C', 'copy', None, _('detect copies')),
69 ('S', 'search', "", _('search'))],
69 ('S', 'search', "", _('search'))],
70 ('[OPTION]... NODE1 NODE2 [FILE]...'),
70 ('[OPTION]... NODE1 NODE2 [FILE]...'),
71 inferrepo=True)
71 inferrepo=True)
72 def difftree(ui, repo, node1=None, node2=None, *files, **opts):
72 def difftree(ui, repo, node1=None, node2=None, *files, **opts):
73 """diff trees from two commits"""
73 """diff trees from two commits"""
74 def __difftree(repo, node1, node2, files=[]):
74 def __difftree(repo, node1, node2, files=[]):
75 assert node2 is not None
75 assert node2 is not None
76 mmap = repo[node1].manifest()
76 mmap = repo[node1].manifest()
77 mmap2 = repo[node2].manifest()
77 mmap2 = repo[node2].manifest()
78 m = scmutil.match(repo[node1], files)
78 m = scmutil.match(repo[node1], files)
79 modified, added, removed = repo.status(node1, node2, m)[:3]
79 modified, added, removed = repo.status(node1, node2, m)[:3]
80 empty = short(nullid)
80 empty = short(nullid)
81
81
82 for f in modified:
82 for f in modified:
83 # TODO get file permissions
83 # TODO get file permissions
84 ui.write((":100664 100664 %s %s M\t%s\t%s\n") %
84 ui.write((":100664 100664 %s %s M\t%s\t%s\n") %
85 (short(mmap[f]), short(mmap2[f]), f, f))
85 (short(mmap[f]), short(mmap2[f]), f, f))
86 for f in added:
86 for f in added:
87 ui.write((":000000 100664 %s %s N\t%s\t%s\n") %
87 ui.write((":000000 100664 %s %s N\t%s\t%s\n") %
88 (empty, short(mmap2[f]), f, f))
88 (empty, short(mmap2[f]), f, f))
89 for f in removed:
89 for f in removed:
90 ui.write((":100664 000000 %s %s D\t%s\t%s\n") %
90 ui.write((":100664 000000 %s %s D\t%s\t%s\n") %
91 (short(mmap[f]), empty, f, f))
91 (short(mmap[f]), empty, f, f))
92 ##
92 ##
93
93
94 while True:
94 while True:
95 if opts['stdin']:
95 if opts['stdin']:
96 try:
96 try:
97 line = raw_input().split(' ')
97 line = raw_input().split(' ')
98 node1 = line[0]
98 node1 = line[0]
99 if len(line) > 1:
99 if len(line) > 1:
100 node2 = line[1]
100 node2 = line[1]
101 else:
101 else:
102 node2 = None
102 node2 = None
103 except EOFError:
103 except EOFError:
104 break
104 break
105 node1 = repo.lookup(node1)
105 node1 = repo.lookup(node1)
106 if node2:
106 if node2:
107 node2 = repo.lookup(node2)
107 node2 = repo.lookup(node2)
108 else:
108 else:
109 node2 = node1
109 node2 = node1
110 node1 = repo.changelog.parents(node1)[0]
110 node1 = repo.changelog.parents(node1)[0]
111 if opts['patch']:
111 if opts['patch']:
112 if opts['pretty']:
112 if opts['pretty']:
113 catcommit(ui, repo, node2, "")
113 catcommit(ui, repo, node2, "")
114 m = scmutil.match(repo[node1], files)
114 m = scmutil.match(repo[node1], files)
115 diffopts = patch.difffeatureopts(ui)
115 diffopts = patch.difffeatureopts(ui)
116 diffopts.git = True
116 diffopts.git = True
117 chunks = patch.diff(repo, node1, node2, match=m,
117 chunks = patch.diff(repo, node1, node2, match=m,
118 opts=diffopts)
118 opts=diffopts)
119 for chunk in chunks:
119 for chunk in chunks:
120 ui.write(chunk)
120 ui.write(chunk)
121 else:
121 else:
122 __difftree(repo, node1, node2, files=files)
122 __difftree(repo, node1, node2, files=files)
123 if not opts['stdin']:
123 if not opts['stdin']:
124 break
124 break
125
125
126 def catcommit(ui, repo, n, prefix, ctx=None):
126 def catcommit(ui, repo, n, prefix, ctx=None):
127 nlprefix = '\n' + prefix
127 nlprefix = '\n' + prefix
128 if ctx is None:
128 if ctx is None:
129 ctx = repo[n]
129 ctx = repo[n]
130 # use ctx.node() instead ??
130 # use ctx.node() instead ??
131 ui.write(("tree %s\n" % short(ctx.changeset()[0])))
131 ui.write(("tree %s\n" % short(ctx.changeset()[0])))
132 for p in ctx.parents():
132 for p in ctx.parents():
133 ui.write(("parent %s\n" % p))
133 ui.write(("parent %s\n" % p))
134
134
135 date = ctx.date()
135 date = ctx.date()
136 description = ctx.description().replace("\0", "")
136 description = ctx.description().replace("\0", "")
137 ui.write(("author %s %s %s\n" % (ctx.user(), int(date[0]), date[1])))
137 ui.write(("author %s %s %s\n" % (ctx.user(), int(date[0]), date[1])))
138
138
139 if 'committer' in ctx.extra():
139 if 'committer' in ctx.extra():
140 ui.write(("committer %s\n" % ctx.extra()['committer']))
140 ui.write(("committer %s\n" % ctx.extra()['committer']))
141
141
142 ui.write(("revision %d\n" % ctx.rev()))
142 ui.write(("revision %d\n" % ctx.rev()))
143 ui.write(("branch %s\n" % ctx.branch()))
143 ui.write(("branch %s\n" % ctx.branch()))
144 if obsolete.isenabled(repo, obsolete.createmarkersopt):
144 if obsolete.isenabled(repo, obsolete.createmarkersopt):
145 if ctx.obsolete():
145 if ctx.obsolete():
146 ui.write(("obsolete\n"))
146 ui.write(("obsolete\n"))
147 ui.write(("phase %s\n\n" % ctx.phasestr()))
147 ui.write(("phase %s\n\n" % ctx.phasestr()))
148
148
149 if prefix != "":
149 if prefix != "":
150 ui.write("%s%s\n" % (prefix,
150 ui.write("%s%s\n" % (prefix,
151 description.replace('\n', nlprefix).strip()))
151 description.replace('\n', nlprefix).strip()))
152 else:
152 else:
153 ui.write(description + "\n")
153 ui.write(description + "\n")
154 if prefix:
154 if prefix:
155 ui.write('\0')
155 ui.write('\0')
156
156
157 @command('debug-merge-base', [], _('REV REV'))
157 @command('debug-merge-base', [], _('REV REV'))
158 def base(ui, repo, node1, node2):
158 def base(ui, repo, node1, node2):
159 """output common ancestor information"""
159 """output common ancestor information"""
160 node1 = repo.lookup(node1)
160 node1 = repo.lookup(node1)
161 node2 = repo.lookup(node2)
161 node2 = repo.lookup(node2)
162 n = repo.changelog.ancestor(node1, node2)
162 n = repo.changelog.ancestor(node1, node2)
163 ui.write(short(n) + "\n")
163 ui.write(short(n) + "\n")
164
164
165 @command('debug-cat-file',
165 @command('debug-cat-file',
166 [('s', 'stdin', None, _('stdin'))],
166 [('s', 'stdin', None, _('stdin'))],
167 _('[OPTION]... TYPE FILE'),
167 _('[OPTION]... TYPE FILE'),
168 inferrepo=True)
168 inferrepo=True)
169 def catfile(ui, repo, type=None, r=None, **opts):
169 def catfile(ui, repo, type=None, r=None, **opts):
170 """cat a specific revision"""
170 """cat a specific revision"""
171 # in stdin mode, every line except the commit is prefixed with two
171 # in stdin mode, every line except the commit is prefixed with two
172 # spaces. This way the our caller can find the commit without magic
172 # spaces. This way the our caller can find the commit without magic
173 # strings
173 # strings
174 #
174 #
175 prefix = ""
175 prefix = ""
176 if opts['stdin']:
176 if opts['stdin']:
177 try:
177 try:
178 (type, r) = raw_input().split(' ')
178 (type, r) = raw_input().split(' ')
179 prefix = " "
179 prefix = " "
180 except EOFError:
180 except EOFError:
181 return
181 return
182
182
183 else:
183 else:
184 if not type or not r:
184 if not type or not r:
185 ui.warn(_("cat-file: type or revision not supplied\n"))
185 ui.warn(_("cat-file: type or revision not supplied\n"))
186 commands.help_(ui, 'cat-file')
186 commands.help_(ui, 'cat-file')
187
187
188 while r:
188 while r:
189 if type != "commit":
189 if type != "commit":
190 ui.warn(_("aborting hg cat-file only understands commits\n"))
190 ui.warn(_("aborting hg cat-file only understands commits\n"))
191 return 1
191 return 1
192 n = repo.lookup(r)
192 n = repo.lookup(r)
193 catcommit(ui, repo, n, prefix)
193 catcommit(ui, repo, n, prefix)
194 if opts['stdin']:
194 if opts['stdin']:
195 try:
195 try:
196 (type, r) = raw_input().split(' ')
196 (type, r) = raw_input().split(' ')
197 except EOFError:
197 except EOFError:
198 break
198 break
199 else:
199 else:
200 break
200 break
201
201
202 # git rev-tree is a confusing thing. You can supply a number of
202 # git rev-tree is a confusing thing. You can supply a number of
203 # commit sha1s on the command line, and it walks the commit history
203 # commit sha1s on the command line, and it walks the commit history
204 # telling you which commits are reachable from the supplied ones via
204 # telling you which commits are reachable from the supplied ones via
205 # a bitmask based on arg position.
205 # a bitmask based on arg position.
206 # you can specify a commit to stop at by starting the sha1 with ^
206 # you can specify a commit to stop at by starting the sha1 with ^
207 def revtree(ui, args, repo, full="tree", maxnr=0, parents=False):
207 def revtree(ui, args, repo, full="tree", maxnr=0, parents=False):
208 def chlogwalk():
208 def chlogwalk():
209 count = len(repo)
209 count = len(repo)
210 i = count
210 i = count
211 l = [0] * 100
211 l = [0] * 100
212 chunk = 100
212 chunk = 100
213 while True:
213 while True:
214 if chunk > i:
214 if chunk > i:
215 chunk = i
215 chunk = i
216 i = 0
216 i = 0
217 else:
217 else:
218 i -= chunk
218 i -= chunk
219
219
220 for x in xrange(chunk):
220 for x in xrange(chunk):
221 if i + x >= count:
221 if i + x >= count:
222 l[chunk - x:] = [0] * (chunk - x)
222 l[chunk - x:] = [0] * (chunk - x)
223 break
223 break
224 if full is not None:
224 if full is not None:
225 if (i + x) in repo:
225 if (i + x) in repo:
226 l[x] = repo[i + x]
226 l[x] = repo[i + x]
227 l[x].changeset() # force reading
227 l[x].changeset() # force reading
228 else:
228 else:
229 if (i + x) in repo:
229 if (i + x) in repo:
230 l[x] = 1
230 l[x] = 1
231 for x in xrange(chunk - 1, -1, -1):
231 for x in xrange(chunk - 1, -1, -1):
232 if l[x] != 0:
232 if l[x] != 0:
233 yield (i + x, full is not None and l[x] or None)
233 yield (i + x, full is not None and l[x] or None)
234 if i == 0:
234 if i == 0:
235 break
235 break
236
236
237 # calculate and return the reachability bitmask for sha
237 # calculate and return the reachability bitmask for sha
238 def is_reachable(ar, reachable, sha):
238 def is_reachable(ar, reachable, sha):
239 if len(ar) == 0:
239 if len(ar) == 0:
240 return 1
240 return 1
241 mask = 0
241 mask = 0
242 for i in xrange(len(ar)):
242 for i in xrange(len(ar)):
243 if sha in reachable[i]:
243 if sha in reachable[i]:
244 mask |= 1 << i
244 mask |= 1 << i
245
245
246 return mask
246 return mask
247
247
248 reachable = []
248 reachable = []
249 stop_sha1 = []
249 stop_sha1 = []
250 want_sha1 = []
250 want_sha1 = []
251 count = 0
251 count = 0
252
252
253 # figure out which commits they are asking for and which ones they
253 # figure out which commits they are asking for and which ones they
254 # want us to stop on
254 # want us to stop on
255 for i, arg in enumerate(args):
255 for i, arg in enumerate(args):
256 if arg.startswith('^'):
256 if arg.startswith('^'):
257 s = repo.lookup(arg[1:])
257 s = repo.lookup(arg[1:])
258 stop_sha1.append(s)
258 stop_sha1.append(s)
259 want_sha1.append(s)
259 want_sha1.append(s)
260 elif arg != 'HEAD':
260 elif arg != 'HEAD':
261 want_sha1.append(repo.lookup(arg))
261 want_sha1.append(repo.lookup(arg))
262
262
263 # calculate the graph for the supplied commits
263 # calculate the graph for the supplied commits
264 for i, n in enumerate(want_sha1):
264 for i, n in enumerate(want_sha1):
265 reachable.append(set())
265 reachable.append(set())
266 visit = [n]
266 visit = [n]
267 reachable[i].add(n)
267 reachable[i].add(n)
268 while visit:
268 while visit:
269 n = visit.pop(0)
269 n = visit.pop(0)
270 if n in stop_sha1:
270 if n in stop_sha1:
271 continue
271 continue
272 for p in repo.changelog.parents(n):
272 for p in repo.changelog.parents(n):
273 if p not in reachable[i]:
273 if p not in reachable[i]:
274 reachable[i].add(p)
274 reachable[i].add(p)
275 visit.append(p)
275 visit.append(p)
276 if p in stop_sha1:
276 if p in stop_sha1:
277 continue
277 continue
278
278
279 # walk the repository looking for commits that are in our
279 # walk the repository looking for commits that are in our
280 # reachability graph
280 # reachability graph
281 for i, ctx in chlogwalk():
281 for i, ctx in chlogwalk():
282 if i not in repo:
282 if i not in repo:
283 continue
283 continue
284 n = repo.changelog.node(i)
284 n = repo.changelog.node(i)
285 mask = is_reachable(want_sha1, reachable, n)
285 mask = is_reachable(want_sha1, reachable, n)
286 if mask:
286 if mask:
287 parentstr = ""
287 parentstr = ""
288 if parents:
288 if parents:
289 pp = repo.changelog.parents(n)
289 pp = repo.changelog.parents(n)
290 if pp[0] != nullid:
290 if pp[0] != nullid:
291 parentstr += " " + short(pp[0])
291 parentstr += " " + short(pp[0])
292 if pp[1] != nullid:
292 if pp[1] != nullid:
293 parentstr += " " + short(pp[1])
293 parentstr += " " + short(pp[1])
294 if not full:
294 if not full:
295 ui.write("%s%s\n" % (short(n), parentstr))
295 ui.write("%s%s\n" % (short(n), parentstr))
296 elif full == "commit":
296 elif full == "commit":
297 ui.write("%s%s\n" % (short(n), parentstr))
297 ui.write("%s%s\n" % (short(n), parentstr))
298 catcommit(ui, repo, n, ' ', ctx)
298 catcommit(ui, repo, n, ' ', ctx)
299 else:
299 else:
300 (p1, p2) = repo.changelog.parents(n)
300 (p1, p2) = repo.changelog.parents(n)
301 (h, h1, h2) = map(short, (n, p1, p2))
301 (h, h1, h2) = map(short, (n, p1, p2))
302 (i1, i2) = map(repo.changelog.rev, (p1, p2))
302 (i1, i2) = map(repo.changelog.rev, (p1, p2))
303
303
304 date = ctx.date()[0]
304 date = ctx.date()[0]
305 ui.write("%s %s:%s" % (date, h, mask))
305 ui.write("%s %s:%s" % (date, h, mask))
306 mask = is_reachable(want_sha1, reachable, p1)
306 mask = is_reachable(want_sha1, reachable, p1)
307 if i1 != nullrev and mask > 0:
307 if i1 != nullrev and mask > 0:
308 ui.write("%s:%s " % (h1, mask)),
308 ui.write("%s:%s " % (h1, mask)),
309 mask = is_reachable(want_sha1, reachable, p2)
309 mask = is_reachable(want_sha1, reachable, p2)
310 if i2 != nullrev and mask > 0:
310 if i2 != nullrev and mask > 0:
311 ui.write("%s:%s " % (h2, mask))
311 ui.write("%s:%s " % (h2, mask))
312 ui.write("\n")
312 ui.write("\n")
313 if maxnr and count >= maxnr:
313 if maxnr and count >= maxnr:
314 break
314 break
315 count += 1
315 count += 1
316
316
317 # git rev-list tries to order things by date, and has the ability to stop
317 # git rev-list tries to order things by date, and has the ability to stop
318 # at a given commit without walking the whole repo. TODO add the stop
318 # at a given commit without walking the whole repo. TODO add the stop
319 # parameter
319 # parameter
320 @command('debug-rev-list',
320 @command('debug-rev-list',
321 [('H', 'header', None, _('header')),
321 [('H', 'header', None, _('header')),
322 ('t', 'topo-order', None, _('topo-order')),
322 ('t', 'topo-order', None, _('topo-order')),
323 ('p', 'parents', None, _('parents')),
323 ('p', 'parents', None, _('parents')),
324 ('n', 'max-count', 0, _('max-count'))],
324 ('n', 'max-count', 0, _('max-count'))],
325 ('[OPTION]... REV...'))
325 ('[OPTION]... REV...'))
326 def revlist(ui, repo, *revs, **opts):
326 def revlist(ui, repo, *revs, **opts):
327 """print revisions"""
327 """print revisions"""
328 if opts['header']:
328 if opts['header']:
329 full = "commit"
329 full = "commit"
330 else:
330 else:
331 full = None
331 full = None
332 copy = [x for x in revs]
332 copy = [x for x in revs]
333 revtree(ui, copy, repo, full, opts['max_count'], opts['parents'])
333 revtree(ui, copy, repo, full, opts['max_count'], opts['parents'])
334
334
335 @command('view',
335 @command('view',
336 [('l', 'limit', '',
336 [('l', 'limit', '',
337 _('limit number of changes displayed'), _('NUM'))],
337 _('limit number of changes displayed'), _('NUM'))],
338 _('[-l LIMIT] [REVRANGE]'))
338 _('[-l LIMIT] [REVRANGE]'))
339 def view(ui, repo, *etc, **opts):
339 def view(ui, repo, *etc, **opts):
340 "start interactive history viewer"
340 "start interactive history viewer"
341 os.chdir(repo.root)
341 os.chdir(repo.root)
342 optstr = ' '.join(['--%s %s' % (k, v) for k, v in opts.iteritems() if v])
342 optstr = ' '.join(['--%s %s' % (k, v) for k, v in opts.iteritems() if v])
343 if repo.filtername is None:
343 if repo.filtername is None:
344 optstr += '--hidden'
344 optstr += '--hidden'
345
345
346 cmd = ui.config("hgk", "path", "hgk") + " %s %s" % (optstr, " ".join(etc))
346 cmd = ui.config("hgk", "path", "hgk") + " %s %s" % (optstr, " ".join(etc))
347 ui.debug("running %s\n" % cmd)
347 ui.debug("running %s\n" % cmd)
348 ui.system(cmd)
348 ui.system(cmd)
@@ -1,97 +1,97 b''
1 # highlight - syntax highlighting in hgweb, based on Pygments
1 # highlight - syntax highlighting in hgweb, based on Pygments
2 #
2 #
3 # Copyright 2008, 2009 Patrick Mezard <pmezard@gmail.com> and others
3 # Copyright 2008, 2009 Patrick Mezard <pmezard@gmail.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 #
7 #
8 # The original module was split in an interface and an implementation
8 # The original module was split in an interface and an implementation
9 # file to defer pygments loading and speedup extension setup.
9 # file to defer pygments loading and speedup extension setup.
10
10
11 """syntax highlighting for hgweb (requires Pygments)
11 """syntax highlighting for hgweb (requires Pygments)
12
12
13 It depends on the Pygments syntax highlighting library:
13 It depends on the Pygments syntax highlighting library:
14 http://pygments.org/
14 http://pygments.org/
15
15
16 There are the following configuration options::
16 There are the following configuration options::
17
17
18 [web]
18 [web]
19 pygments_style = <style> (default: colorful)
19 pygments_style = <style> (default: colorful)
20 highlightfiles = <fileset> (default: size('<5M'))
20 highlightfiles = <fileset> (default: size('<5M'))
21 highlightonlymatchfilename = <bool> (default False)
21 highlightonlymatchfilename = <bool> (default False)
22
22
23 ``highlightonlymatchfilename`` will only highlight files if their type could
23 ``highlightonlymatchfilename`` will only highlight files if their type could
24 be identified by their filename. When this is not enabled (the default),
24 be identified by their filename. When this is not enabled (the default),
25 Pygments will try very hard to identify the file type from content and any
25 Pygments will try very hard to identify the file type from content and any
26 match (even matches with a low confidence score) will be used.
26 match (even matches with a low confidence score) will be used.
27 """
27 """
28
28
29 from __future__ import absolute_import
29 from __future__ import absolute_import
30
30
31 from . import highlight
31 from . import highlight
32 from mercurial.hgweb import (
32 from mercurial.hgweb import (
33 common,
33 common,
34 webcommands,
34 webcommands,
35 webutil,
35 webutil,
36 )
36 )
37
37
38 from mercurial import (
38 from mercurial import (
39 encoding,
39 encoding,
40 extensions,
40 extensions,
41 fileset,
41 fileset,
42 )
42 )
43
43
44 # Note for extension authors: ONLY specify testedwith = 'internal' for
44 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
45 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
45 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
46 # be specifying the version(s) of Mercurial they are tested with, or
46 # be specifying the version(s) of Mercurial they are tested with, or
47 # leave the attribute unspecified.
47 # leave the attribute unspecified.
48 testedwith = 'internal'
48 testedwith = 'ships-with-hg-core'
49
49
50 def pygmentize(web, field, fctx, tmpl):
50 def pygmentize(web, field, fctx, tmpl):
51 style = web.config('web', 'pygments_style', 'colorful')
51 style = web.config('web', 'pygments_style', 'colorful')
52 expr = web.config('web', 'highlightfiles', "size('<5M')")
52 expr = web.config('web', 'highlightfiles', "size('<5M')")
53 filenameonly = web.configbool('web', 'highlightonlymatchfilename', False)
53 filenameonly = web.configbool('web', 'highlightonlymatchfilename', False)
54
54
55 ctx = fctx.changectx()
55 ctx = fctx.changectx()
56 tree = fileset.parse(expr)
56 tree = fileset.parse(expr)
57 mctx = fileset.matchctx(ctx, subset=[fctx.path()], status=None)
57 mctx = fileset.matchctx(ctx, subset=[fctx.path()], status=None)
58 if fctx.path() in fileset.getset(mctx, tree):
58 if fctx.path() in fileset.getset(mctx, tree):
59 highlight.pygmentize(field, fctx, style, tmpl,
59 highlight.pygmentize(field, fctx, style, tmpl,
60 guessfilenameonly=filenameonly)
60 guessfilenameonly=filenameonly)
61
61
62 def filerevision_highlight(orig, web, req, tmpl, fctx):
62 def filerevision_highlight(orig, web, req, tmpl, fctx):
63 mt = ''.join(tmpl('mimetype', encoding=encoding.encoding))
63 mt = ''.join(tmpl('mimetype', encoding=encoding.encoding))
64 # only pygmentize for mimetype containing 'html' so we both match
64 # only pygmentize for mimetype containing 'html' so we both match
65 # 'text/html' and possibly 'application/xhtml+xml' in the future
65 # 'text/html' and possibly 'application/xhtml+xml' in the future
66 # so that we don't have to touch the extension when the mimetype
66 # so that we don't have to touch the extension when the mimetype
67 # for a template changes; also hgweb optimizes the case that a
67 # for a template changes; also hgweb optimizes the case that a
68 # raw file is sent using rawfile() and doesn't call us, so we
68 # raw file is sent using rawfile() and doesn't call us, so we
69 # can't clash with the file's content-type here in case we
69 # can't clash with the file's content-type here in case we
70 # pygmentize a html file
70 # pygmentize a html file
71 if 'html' in mt:
71 if 'html' in mt:
72 pygmentize(web, 'fileline', fctx, tmpl)
72 pygmentize(web, 'fileline', fctx, tmpl)
73
73
74 return orig(web, req, tmpl, fctx)
74 return orig(web, req, tmpl, fctx)
75
75
76 def annotate_highlight(orig, web, req, tmpl):
76 def annotate_highlight(orig, web, req, tmpl):
77 mt = ''.join(tmpl('mimetype', encoding=encoding.encoding))
77 mt = ''.join(tmpl('mimetype', encoding=encoding.encoding))
78 if 'html' in mt:
78 if 'html' in mt:
79 fctx = webutil.filectx(web.repo, req)
79 fctx = webutil.filectx(web.repo, req)
80 pygmentize(web, 'annotateline', fctx, tmpl)
80 pygmentize(web, 'annotateline', fctx, tmpl)
81
81
82 return orig(web, req, tmpl)
82 return orig(web, req, tmpl)
83
83
84 def generate_css(web, req, tmpl):
84 def generate_css(web, req, tmpl):
85 pg_style = web.config('web', 'pygments_style', 'colorful')
85 pg_style = web.config('web', 'pygments_style', 'colorful')
86 fmter = highlight.HtmlFormatter(style=pg_style)
86 fmter = highlight.HtmlFormatter(style=pg_style)
87 req.respond(common.HTTP_OK, 'text/css')
87 req.respond(common.HTTP_OK, 'text/css')
88 return ['/* pygments_style = %s */\n\n' % pg_style,
88 return ['/* pygments_style = %s */\n\n' % pg_style,
89 fmter.get_style_defs('')]
89 fmter.get_style_defs('')]
90
90
91 def extsetup():
91 def extsetup():
92 # monkeypatch in the new version
92 # monkeypatch in the new version
93 extensions.wrapfunction(webcommands, '_filerevision',
93 extensions.wrapfunction(webcommands, '_filerevision',
94 filerevision_highlight)
94 filerevision_highlight)
95 extensions.wrapfunction(webcommands, 'annotate', annotate_highlight)
95 extensions.wrapfunction(webcommands, 'annotate', annotate_highlight)
96 webcommands.highlightcss = generate_css
96 webcommands.highlightcss = generate_css
97 webcommands.__all__.append('highlightcss')
97 webcommands.__all__.append('highlightcss')
@@ -1,1632 +1,1632 b''
1 # histedit.py - interactive history editing for mercurial
1 # histedit.py - interactive history editing for mercurial
2 #
2 #
3 # Copyright 2009 Augie Fackler <raf@durin42.com>
3 # Copyright 2009 Augie Fackler <raf@durin42.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 """interactive history editing
7 """interactive history editing
8
8
9 With this extension installed, Mercurial gains one new command: histedit. Usage
9 With this extension installed, Mercurial gains one new command: histedit. Usage
10 is as follows, assuming the following history::
10 is as follows, assuming the following history::
11
11
12 @ 3[tip] 7c2fd3b9020c 2009-04-27 18:04 -0500 durin42
12 @ 3[tip] 7c2fd3b9020c 2009-04-27 18:04 -0500 durin42
13 | Add delta
13 | Add delta
14 |
14 |
15 o 2 030b686bedc4 2009-04-27 18:04 -0500 durin42
15 o 2 030b686bedc4 2009-04-27 18:04 -0500 durin42
16 | Add gamma
16 | Add gamma
17 |
17 |
18 o 1 c561b4e977df 2009-04-27 18:04 -0500 durin42
18 o 1 c561b4e977df 2009-04-27 18:04 -0500 durin42
19 | Add beta
19 | Add beta
20 |
20 |
21 o 0 d8d2fcd0e319 2009-04-27 18:04 -0500 durin42
21 o 0 d8d2fcd0e319 2009-04-27 18:04 -0500 durin42
22 Add alpha
22 Add alpha
23
23
24 If you were to run ``hg histedit c561b4e977df``, you would see the following
24 If you were to run ``hg histedit c561b4e977df``, you would see the following
25 file open in your editor::
25 file open in your editor::
26
26
27 pick c561b4e977df Add beta
27 pick c561b4e977df Add beta
28 pick 030b686bedc4 Add gamma
28 pick 030b686bedc4 Add gamma
29 pick 7c2fd3b9020c Add delta
29 pick 7c2fd3b9020c Add delta
30
30
31 # Edit history between c561b4e977df and 7c2fd3b9020c
31 # Edit history between c561b4e977df and 7c2fd3b9020c
32 #
32 #
33 # Commits are listed from least to most recent
33 # Commits are listed from least to most recent
34 #
34 #
35 # Commands:
35 # Commands:
36 # p, pick = use commit
36 # p, pick = use commit
37 # e, edit = use commit, but stop for amending
37 # e, edit = use commit, but stop for amending
38 # f, fold = use commit, but combine it with the one above
38 # f, fold = use commit, but combine it with the one above
39 # r, roll = like fold, but discard this commit's description
39 # r, roll = like fold, but discard this commit's description
40 # d, drop = remove commit from history
40 # d, drop = remove commit from history
41 # m, mess = edit commit message without changing commit content
41 # m, mess = edit commit message without changing commit content
42 #
42 #
43
43
44 In this file, lines beginning with ``#`` are ignored. You must specify a rule
44 In this file, lines beginning with ``#`` are ignored. You must specify a rule
45 for each revision in your history. For example, if you had meant to add gamma
45 for each revision in your history. For example, if you had meant to add gamma
46 before beta, and then wanted to add delta in the same revision as beta, you
46 before beta, and then wanted to add delta in the same revision as beta, you
47 would reorganize the file to look like this::
47 would reorganize the file to look like this::
48
48
49 pick 030b686bedc4 Add gamma
49 pick 030b686bedc4 Add gamma
50 pick c561b4e977df Add beta
50 pick c561b4e977df Add beta
51 fold 7c2fd3b9020c Add delta
51 fold 7c2fd3b9020c Add delta
52
52
53 # Edit history between c561b4e977df and 7c2fd3b9020c
53 # Edit history between c561b4e977df and 7c2fd3b9020c
54 #
54 #
55 # Commits are listed from least to most recent
55 # Commits are listed from least to most recent
56 #
56 #
57 # Commands:
57 # Commands:
58 # p, pick = use commit
58 # p, pick = use commit
59 # e, edit = use commit, but stop for amending
59 # e, edit = use commit, but stop for amending
60 # f, fold = use commit, but combine it with the one above
60 # f, fold = use commit, but combine it with the one above
61 # r, roll = like fold, but discard this commit's description
61 # r, roll = like fold, but discard this commit's description
62 # d, drop = remove commit from history
62 # d, drop = remove commit from history
63 # m, mess = edit commit message without changing commit content
63 # m, mess = edit commit message without changing commit content
64 #
64 #
65
65
66 At which point you close the editor and ``histedit`` starts working. When you
66 At which point you close the editor and ``histedit`` starts working. When you
67 specify a ``fold`` operation, ``histedit`` will open an editor when it folds
67 specify a ``fold`` operation, ``histedit`` will open an editor when it folds
68 those revisions together, offering you a chance to clean up the commit message::
68 those revisions together, offering you a chance to clean up the commit message::
69
69
70 Add beta
70 Add beta
71 ***
71 ***
72 Add delta
72 Add delta
73
73
74 Edit the commit message to your liking, then close the editor. For
74 Edit the commit message to your liking, then close the editor. For
75 this example, let's assume that the commit message was changed to
75 this example, let's assume that the commit message was changed to
76 ``Add beta and delta.`` After histedit has run and had a chance to
76 ``Add beta and delta.`` After histedit has run and had a chance to
77 remove any old or temporary revisions it needed, the history looks
77 remove any old or temporary revisions it needed, the history looks
78 like this::
78 like this::
79
79
80 @ 2[tip] 989b4d060121 2009-04-27 18:04 -0500 durin42
80 @ 2[tip] 989b4d060121 2009-04-27 18:04 -0500 durin42
81 | Add beta and delta.
81 | Add beta and delta.
82 |
82 |
83 o 1 081603921c3f 2009-04-27 18:04 -0500 durin42
83 o 1 081603921c3f 2009-04-27 18:04 -0500 durin42
84 | Add gamma
84 | Add gamma
85 |
85 |
86 o 0 d8d2fcd0e319 2009-04-27 18:04 -0500 durin42
86 o 0 d8d2fcd0e319 2009-04-27 18:04 -0500 durin42
87 Add alpha
87 Add alpha
88
88
89 Note that ``histedit`` does *not* remove any revisions (even its own temporary
89 Note that ``histedit`` does *not* remove any revisions (even its own temporary
90 ones) until after it has completed all the editing operations, so it will
90 ones) until after it has completed all the editing operations, so it will
91 probably perform several strip operations when it's done. For the above example,
91 probably perform several strip operations when it's done. For the above example,
92 it had to run strip twice. Strip can be slow depending on a variety of factors,
92 it had to run strip twice. Strip can be slow depending on a variety of factors,
93 so you might need to be a little patient. You can choose to keep the original
93 so you might need to be a little patient. You can choose to keep the original
94 revisions by passing the ``--keep`` flag.
94 revisions by passing the ``--keep`` flag.
95
95
96 The ``edit`` operation will drop you back to a command prompt,
96 The ``edit`` operation will drop you back to a command prompt,
97 allowing you to edit files freely, or even use ``hg record`` to commit
97 allowing you to edit files freely, or even use ``hg record`` to commit
98 some changes as a separate commit. When you're done, any remaining
98 some changes as a separate commit. When you're done, any remaining
99 uncommitted changes will be committed as well. When done, run ``hg
99 uncommitted changes will be committed as well. When done, run ``hg
100 histedit --continue`` to finish this step. You'll be prompted for a
100 histedit --continue`` to finish this step. You'll be prompted for a
101 new commit message, but the default commit message will be the
101 new commit message, but the default commit message will be the
102 original message for the ``edit`` ed revision.
102 original message for the ``edit`` ed revision.
103
103
104 The ``message`` operation will give you a chance to revise a commit
104 The ``message`` operation will give you a chance to revise a commit
105 message without changing the contents. It's a shortcut for doing
105 message without changing the contents. It's a shortcut for doing
106 ``edit`` immediately followed by `hg histedit --continue``.
106 ``edit`` immediately followed by `hg histedit --continue``.
107
107
108 If ``histedit`` encounters a conflict when moving a revision (while
108 If ``histedit`` encounters a conflict when moving a revision (while
109 handling ``pick`` or ``fold``), it'll stop in a similar manner to
109 handling ``pick`` or ``fold``), it'll stop in a similar manner to
110 ``edit`` with the difference that it won't prompt you for a commit
110 ``edit`` with the difference that it won't prompt you for a commit
111 message when done. If you decide at this point that you don't like how
111 message when done. If you decide at this point that you don't like how
112 much work it will be to rearrange history, or that you made a mistake,
112 much work it will be to rearrange history, or that you made a mistake,
113 you can use ``hg histedit --abort`` to abandon the new changes you
113 you can use ``hg histedit --abort`` to abandon the new changes you
114 have made and return to the state before you attempted to edit your
114 have made and return to the state before you attempted to edit your
115 history.
115 history.
116
116
117 If we clone the histedit-ed example repository above and add four more
117 If we clone the histedit-ed example repository above and add four more
118 changes, such that we have the following history::
118 changes, such that we have the following history::
119
119
120 @ 6[tip] 038383181893 2009-04-27 18:04 -0500 stefan
120 @ 6[tip] 038383181893 2009-04-27 18:04 -0500 stefan
121 | Add theta
121 | Add theta
122 |
122 |
123 o 5 140988835471 2009-04-27 18:04 -0500 stefan
123 o 5 140988835471 2009-04-27 18:04 -0500 stefan
124 | Add eta
124 | Add eta
125 |
125 |
126 o 4 122930637314 2009-04-27 18:04 -0500 stefan
126 o 4 122930637314 2009-04-27 18:04 -0500 stefan
127 | Add zeta
127 | Add zeta
128 |
128 |
129 o 3 836302820282 2009-04-27 18:04 -0500 stefan
129 o 3 836302820282 2009-04-27 18:04 -0500 stefan
130 | Add epsilon
130 | Add epsilon
131 |
131 |
132 o 2 989b4d060121 2009-04-27 18:04 -0500 durin42
132 o 2 989b4d060121 2009-04-27 18:04 -0500 durin42
133 | Add beta and delta.
133 | Add beta and delta.
134 |
134 |
135 o 1 081603921c3f 2009-04-27 18:04 -0500 durin42
135 o 1 081603921c3f 2009-04-27 18:04 -0500 durin42
136 | Add gamma
136 | Add gamma
137 |
137 |
138 o 0 d8d2fcd0e319 2009-04-27 18:04 -0500 durin42
138 o 0 d8d2fcd0e319 2009-04-27 18:04 -0500 durin42
139 Add alpha
139 Add alpha
140
140
141 If you run ``hg histedit --outgoing`` on the clone then it is the same
141 If you run ``hg histedit --outgoing`` on the clone then it is the same
142 as running ``hg histedit 836302820282``. If you need plan to push to a
142 as running ``hg histedit 836302820282``. If you need plan to push to a
143 repository that Mercurial does not detect to be related to the source
143 repository that Mercurial does not detect to be related to the source
144 repo, you can add a ``--force`` option.
144 repo, you can add a ``--force`` option.
145
145
146 Config
146 Config
147 ------
147 ------
148
148
149 Histedit rule lines are truncated to 80 characters by default. You
149 Histedit rule lines are truncated to 80 characters by default. You
150 can customize this behavior by setting a different length in your
150 can customize this behavior by setting a different length in your
151 configuration file::
151 configuration file::
152
152
153 [histedit]
153 [histedit]
154 linelen = 120 # truncate rule lines at 120 characters
154 linelen = 120 # truncate rule lines at 120 characters
155
155
156 ``hg histedit`` attempts to automatically choose an appropriate base
156 ``hg histedit`` attempts to automatically choose an appropriate base
157 revision to use. To change which base revision is used, define a
157 revision to use. To change which base revision is used, define a
158 revset in your configuration file::
158 revset in your configuration file::
159
159
160 [histedit]
160 [histedit]
161 defaultrev = only(.) & draft()
161 defaultrev = only(.) & draft()
162
162
163 By default each edited revision needs to be present in histedit commands.
163 By default each edited revision needs to be present in histedit commands.
164 To remove revision you need to use ``drop`` operation. You can configure
164 To remove revision you need to use ``drop`` operation. You can configure
165 the drop to be implicit for missing commits by adding::
165 the drop to be implicit for missing commits by adding::
166
166
167 [histedit]
167 [histedit]
168 dropmissing = True
168 dropmissing = True
169
169
170 """
170 """
171
171
172 from __future__ import absolute_import
172 from __future__ import absolute_import
173
173
174 import errno
174 import errno
175 import os
175 import os
176 import sys
176 import sys
177
177
178 from mercurial.i18n import _
178 from mercurial.i18n import _
179 from mercurial import (
179 from mercurial import (
180 bundle2,
180 bundle2,
181 cmdutil,
181 cmdutil,
182 context,
182 context,
183 copies,
183 copies,
184 destutil,
184 destutil,
185 discovery,
185 discovery,
186 error,
186 error,
187 exchange,
187 exchange,
188 extensions,
188 extensions,
189 hg,
189 hg,
190 lock,
190 lock,
191 merge as mergemod,
191 merge as mergemod,
192 node,
192 node,
193 obsolete,
193 obsolete,
194 repair,
194 repair,
195 scmutil,
195 scmutil,
196 util,
196 util,
197 )
197 )
198
198
199 pickle = util.pickle
199 pickle = util.pickle
200 release = lock.release
200 release = lock.release
201 cmdtable = {}
201 cmdtable = {}
202 command = cmdutil.command(cmdtable)
202 command = cmdutil.command(cmdtable)
203
203
204 class _constraints(object):
204 class _constraints(object):
205 # aborts if there are multiple rules for one node
205 # aborts if there are multiple rules for one node
206 noduplicates = 'noduplicates'
206 noduplicates = 'noduplicates'
207 # abort if the node does belong to edited stack
207 # abort if the node does belong to edited stack
208 forceother = 'forceother'
208 forceother = 'forceother'
209 # abort if the node doesn't belong to edited stack
209 # abort if the node doesn't belong to edited stack
210 noother = 'noother'
210 noother = 'noother'
211
211
212 @classmethod
212 @classmethod
213 def known(cls):
213 def known(cls):
214 return set([v for k, v in cls.__dict__.items() if k[0] != '_'])
214 return set([v for k, v in cls.__dict__.items() if k[0] != '_'])
215
215
216 # Note for extension authors: ONLY specify testedwith = 'internal' for
216 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
217 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
217 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
218 # be specifying the version(s) of Mercurial they are tested with, or
218 # be specifying the version(s) of Mercurial they are tested with, or
219 # leave the attribute unspecified.
219 # leave the attribute unspecified.
220 testedwith = 'internal'
220 testedwith = 'ships-with-hg-core'
221
221
222 actiontable = {}
222 actiontable = {}
223 primaryactions = set()
223 primaryactions = set()
224 secondaryactions = set()
224 secondaryactions = set()
225 tertiaryactions = set()
225 tertiaryactions = set()
226 internalactions = set()
226 internalactions = set()
227
227
228 def geteditcomment(ui, first, last):
228 def geteditcomment(ui, first, last):
229 """ construct the editor comment
229 """ construct the editor comment
230 The comment includes::
230 The comment includes::
231 - an intro
231 - an intro
232 - sorted primary commands
232 - sorted primary commands
233 - sorted short commands
233 - sorted short commands
234 - sorted long commands
234 - sorted long commands
235 - additional hints
235 - additional hints
236
236
237 Commands are only included once.
237 Commands are only included once.
238 """
238 """
239 intro = _("""Edit history between %s and %s
239 intro = _("""Edit history between %s and %s
240
240
241 Commits are listed from least to most recent
241 Commits are listed from least to most recent
242
242
243 You can reorder changesets by reordering the lines
243 You can reorder changesets by reordering the lines
244
244
245 Commands:
245 Commands:
246 """)
246 """)
247 actions = []
247 actions = []
248 def addverb(v):
248 def addverb(v):
249 a = actiontable[v]
249 a = actiontable[v]
250 lines = a.message.split("\n")
250 lines = a.message.split("\n")
251 if len(a.verbs):
251 if len(a.verbs):
252 v = ', '.join(sorted(a.verbs, key=lambda v: len(v)))
252 v = ', '.join(sorted(a.verbs, key=lambda v: len(v)))
253 actions.append(" %s = %s" % (v, lines[0]))
253 actions.append(" %s = %s" % (v, lines[0]))
254 actions.extend([' %s' for l in lines[1:]])
254 actions.extend([' %s' for l in lines[1:]])
255
255
256 for v in (
256 for v in (
257 sorted(primaryactions) +
257 sorted(primaryactions) +
258 sorted(secondaryactions) +
258 sorted(secondaryactions) +
259 sorted(tertiaryactions)
259 sorted(tertiaryactions)
260 ):
260 ):
261 addverb(v)
261 addverb(v)
262 actions.append('')
262 actions.append('')
263
263
264 hints = []
264 hints = []
265 if ui.configbool('histedit', 'dropmissing'):
265 if ui.configbool('histedit', 'dropmissing'):
266 hints.append("Deleting a changeset from the list "
266 hints.append("Deleting a changeset from the list "
267 "will DISCARD it from the edited history!")
267 "will DISCARD it from the edited history!")
268
268
269 lines = (intro % (first, last)).split('\n') + actions + hints
269 lines = (intro % (first, last)).split('\n') + actions + hints
270
270
271 return ''.join(['# %s\n' % l if l else '#\n' for l in lines])
271 return ''.join(['# %s\n' % l if l else '#\n' for l in lines])
272
272
273 class histeditstate(object):
273 class histeditstate(object):
274 def __init__(self, repo, parentctxnode=None, actions=None, keep=None,
274 def __init__(self, repo, parentctxnode=None, actions=None, keep=None,
275 topmost=None, replacements=None, lock=None, wlock=None):
275 topmost=None, replacements=None, lock=None, wlock=None):
276 self.repo = repo
276 self.repo = repo
277 self.actions = actions
277 self.actions = actions
278 self.keep = keep
278 self.keep = keep
279 self.topmost = topmost
279 self.topmost = topmost
280 self.parentctxnode = parentctxnode
280 self.parentctxnode = parentctxnode
281 self.lock = lock
281 self.lock = lock
282 self.wlock = wlock
282 self.wlock = wlock
283 self.backupfile = None
283 self.backupfile = None
284 if replacements is None:
284 if replacements is None:
285 self.replacements = []
285 self.replacements = []
286 else:
286 else:
287 self.replacements = replacements
287 self.replacements = replacements
288
288
289 def read(self):
289 def read(self):
290 """Load histedit state from disk and set fields appropriately."""
290 """Load histedit state from disk and set fields appropriately."""
291 try:
291 try:
292 state = self.repo.vfs.read('histedit-state')
292 state = self.repo.vfs.read('histedit-state')
293 except IOError as err:
293 except IOError as err:
294 if err.errno != errno.ENOENT:
294 if err.errno != errno.ENOENT:
295 raise
295 raise
296 cmdutil.wrongtooltocontinue(self.repo, _('histedit'))
296 cmdutil.wrongtooltocontinue(self.repo, _('histedit'))
297
297
298 if state.startswith('v1\n'):
298 if state.startswith('v1\n'):
299 data = self._load()
299 data = self._load()
300 parentctxnode, rules, keep, topmost, replacements, backupfile = data
300 parentctxnode, rules, keep, topmost, replacements, backupfile = data
301 else:
301 else:
302 data = pickle.loads(state)
302 data = pickle.loads(state)
303 parentctxnode, rules, keep, topmost, replacements = data
303 parentctxnode, rules, keep, topmost, replacements = data
304 backupfile = None
304 backupfile = None
305
305
306 self.parentctxnode = parentctxnode
306 self.parentctxnode = parentctxnode
307 rules = "\n".join(["%s %s" % (verb, rest) for [verb, rest] in rules])
307 rules = "\n".join(["%s %s" % (verb, rest) for [verb, rest] in rules])
308 actions = parserules(rules, self)
308 actions = parserules(rules, self)
309 self.actions = actions
309 self.actions = actions
310 self.keep = keep
310 self.keep = keep
311 self.topmost = topmost
311 self.topmost = topmost
312 self.replacements = replacements
312 self.replacements = replacements
313 self.backupfile = backupfile
313 self.backupfile = backupfile
314
314
315 def write(self):
315 def write(self):
316 fp = self.repo.vfs('histedit-state', 'w')
316 fp = self.repo.vfs('histedit-state', 'w')
317 fp.write('v1\n')
317 fp.write('v1\n')
318 fp.write('%s\n' % node.hex(self.parentctxnode))
318 fp.write('%s\n' % node.hex(self.parentctxnode))
319 fp.write('%s\n' % node.hex(self.topmost))
319 fp.write('%s\n' % node.hex(self.topmost))
320 fp.write('%s\n' % self.keep)
320 fp.write('%s\n' % self.keep)
321 fp.write('%d\n' % len(self.actions))
321 fp.write('%d\n' % len(self.actions))
322 for action in self.actions:
322 for action in self.actions:
323 fp.write('%s\n' % action.tostate())
323 fp.write('%s\n' % action.tostate())
324 fp.write('%d\n' % len(self.replacements))
324 fp.write('%d\n' % len(self.replacements))
325 for replacement in self.replacements:
325 for replacement in self.replacements:
326 fp.write('%s%s\n' % (node.hex(replacement[0]), ''.join(node.hex(r)
326 fp.write('%s%s\n' % (node.hex(replacement[0]), ''.join(node.hex(r)
327 for r in replacement[1])))
327 for r in replacement[1])))
328 backupfile = self.backupfile
328 backupfile = self.backupfile
329 if not backupfile:
329 if not backupfile:
330 backupfile = ''
330 backupfile = ''
331 fp.write('%s\n' % backupfile)
331 fp.write('%s\n' % backupfile)
332 fp.close()
332 fp.close()
333
333
334 def _load(self):
334 def _load(self):
335 fp = self.repo.vfs('histedit-state', 'r')
335 fp = self.repo.vfs('histedit-state', 'r')
336 lines = [l[:-1] for l in fp.readlines()]
336 lines = [l[:-1] for l in fp.readlines()]
337
337
338 index = 0
338 index = 0
339 lines[index] # version number
339 lines[index] # version number
340 index += 1
340 index += 1
341
341
342 parentctxnode = node.bin(lines[index])
342 parentctxnode = node.bin(lines[index])
343 index += 1
343 index += 1
344
344
345 topmost = node.bin(lines[index])
345 topmost = node.bin(lines[index])
346 index += 1
346 index += 1
347
347
348 keep = lines[index] == 'True'
348 keep = lines[index] == 'True'
349 index += 1
349 index += 1
350
350
351 # Rules
351 # Rules
352 rules = []
352 rules = []
353 rulelen = int(lines[index])
353 rulelen = int(lines[index])
354 index += 1
354 index += 1
355 for i in xrange(rulelen):
355 for i in xrange(rulelen):
356 ruleaction = lines[index]
356 ruleaction = lines[index]
357 index += 1
357 index += 1
358 rule = lines[index]
358 rule = lines[index]
359 index += 1
359 index += 1
360 rules.append((ruleaction, rule))
360 rules.append((ruleaction, rule))
361
361
362 # Replacements
362 # Replacements
363 replacements = []
363 replacements = []
364 replacementlen = int(lines[index])
364 replacementlen = int(lines[index])
365 index += 1
365 index += 1
366 for i in xrange(replacementlen):
366 for i in xrange(replacementlen):
367 replacement = lines[index]
367 replacement = lines[index]
368 original = node.bin(replacement[:40])
368 original = node.bin(replacement[:40])
369 succ = [node.bin(replacement[i:i + 40]) for i in
369 succ = [node.bin(replacement[i:i + 40]) for i in
370 range(40, len(replacement), 40)]
370 range(40, len(replacement), 40)]
371 replacements.append((original, succ))
371 replacements.append((original, succ))
372 index += 1
372 index += 1
373
373
374 backupfile = lines[index]
374 backupfile = lines[index]
375 index += 1
375 index += 1
376
376
377 fp.close()
377 fp.close()
378
378
379 return parentctxnode, rules, keep, topmost, replacements, backupfile
379 return parentctxnode, rules, keep, topmost, replacements, backupfile
380
380
381 def clear(self):
381 def clear(self):
382 if self.inprogress():
382 if self.inprogress():
383 self.repo.vfs.unlink('histedit-state')
383 self.repo.vfs.unlink('histedit-state')
384
384
385 def inprogress(self):
385 def inprogress(self):
386 return self.repo.vfs.exists('histedit-state')
386 return self.repo.vfs.exists('histedit-state')
387
387
388
388
389 class histeditaction(object):
389 class histeditaction(object):
390 def __init__(self, state, node):
390 def __init__(self, state, node):
391 self.state = state
391 self.state = state
392 self.repo = state.repo
392 self.repo = state.repo
393 self.node = node
393 self.node = node
394
394
395 @classmethod
395 @classmethod
396 def fromrule(cls, state, rule):
396 def fromrule(cls, state, rule):
397 """Parses the given rule, returning an instance of the histeditaction.
397 """Parses the given rule, returning an instance of the histeditaction.
398 """
398 """
399 rulehash = rule.strip().split(' ', 1)[0]
399 rulehash = rule.strip().split(' ', 1)[0]
400 try:
400 try:
401 rev = node.bin(rulehash)
401 rev = node.bin(rulehash)
402 except TypeError:
402 except TypeError:
403 raise error.ParseError("invalid changeset %s" % rulehash)
403 raise error.ParseError("invalid changeset %s" % rulehash)
404 return cls(state, rev)
404 return cls(state, rev)
405
405
406 def verify(self, prev):
406 def verify(self, prev):
407 """ Verifies semantic correctness of the rule"""
407 """ Verifies semantic correctness of the rule"""
408 repo = self.repo
408 repo = self.repo
409 ha = node.hex(self.node)
409 ha = node.hex(self.node)
410 try:
410 try:
411 self.node = repo[ha].node()
411 self.node = repo[ha].node()
412 except error.RepoError:
412 except error.RepoError:
413 raise error.ParseError(_('unknown changeset %s listed')
413 raise error.ParseError(_('unknown changeset %s listed')
414 % ha[:12])
414 % ha[:12])
415
415
416 def torule(self):
416 def torule(self):
417 """build a histedit rule line for an action
417 """build a histedit rule line for an action
418
418
419 by default lines are in the form:
419 by default lines are in the form:
420 <hash> <rev> <summary>
420 <hash> <rev> <summary>
421 """
421 """
422 ctx = self.repo[self.node]
422 ctx = self.repo[self.node]
423 summary = _getsummary(ctx)
423 summary = _getsummary(ctx)
424 line = '%s %s %d %s' % (self.verb, ctx, ctx.rev(), summary)
424 line = '%s %s %d %s' % (self.verb, ctx, ctx.rev(), summary)
425 # trim to 75 columns by default so it's not stupidly wide in my editor
425 # trim to 75 columns by default so it's not stupidly wide in my editor
426 # (the 5 more are left for verb)
426 # (the 5 more are left for verb)
427 maxlen = self.repo.ui.configint('histedit', 'linelen', default=80)
427 maxlen = self.repo.ui.configint('histedit', 'linelen', default=80)
428 maxlen = max(maxlen, 22) # avoid truncating hash
428 maxlen = max(maxlen, 22) # avoid truncating hash
429 return util.ellipsis(line, maxlen)
429 return util.ellipsis(line, maxlen)
430
430
431 def tostate(self):
431 def tostate(self):
432 """Print an action in format used by histedit state files
432 """Print an action in format used by histedit state files
433 (the first line is a verb, the remainder is the second)
433 (the first line is a verb, the remainder is the second)
434 """
434 """
435 return "%s\n%s" % (self.verb, node.hex(self.node))
435 return "%s\n%s" % (self.verb, node.hex(self.node))
436
436
437 def constraints(self):
437 def constraints(self):
438 """Return a set of constrains that this action should be verified for
438 """Return a set of constrains that this action should be verified for
439 """
439 """
440 return set([_constraints.noduplicates, _constraints.noother])
440 return set([_constraints.noduplicates, _constraints.noother])
441
441
442 def nodetoverify(self):
442 def nodetoverify(self):
443 """Returns a node associated with the action that will be used for
443 """Returns a node associated with the action that will be used for
444 verification purposes.
444 verification purposes.
445
445
446 If the action doesn't correspond to node it should return None
446 If the action doesn't correspond to node it should return None
447 """
447 """
448 return self.node
448 return self.node
449
449
450 def run(self):
450 def run(self):
451 """Runs the action. The default behavior is simply apply the action's
451 """Runs the action. The default behavior is simply apply the action's
452 rulectx onto the current parentctx."""
452 rulectx onto the current parentctx."""
453 self.applychange()
453 self.applychange()
454 self.continuedirty()
454 self.continuedirty()
455 return self.continueclean()
455 return self.continueclean()
456
456
457 def applychange(self):
457 def applychange(self):
458 """Applies the changes from this action's rulectx onto the current
458 """Applies the changes from this action's rulectx onto the current
459 parentctx, but does not commit them."""
459 parentctx, but does not commit them."""
460 repo = self.repo
460 repo = self.repo
461 rulectx = repo[self.node]
461 rulectx = repo[self.node]
462 repo.ui.pushbuffer(error=True, labeled=True)
462 repo.ui.pushbuffer(error=True, labeled=True)
463 hg.update(repo, self.state.parentctxnode, quietempty=True)
463 hg.update(repo, self.state.parentctxnode, quietempty=True)
464 stats = applychanges(repo.ui, repo, rulectx, {})
464 stats = applychanges(repo.ui, repo, rulectx, {})
465 if stats and stats[3] > 0:
465 if stats and stats[3] > 0:
466 buf = repo.ui.popbuffer()
466 buf = repo.ui.popbuffer()
467 repo.ui.write(*buf)
467 repo.ui.write(*buf)
468 raise error.InterventionRequired(
468 raise error.InterventionRequired(
469 _('Fix up the change (%s %s)') %
469 _('Fix up the change (%s %s)') %
470 (self.verb, node.short(self.node)),
470 (self.verb, node.short(self.node)),
471 hint=_('hg histedit --continue to resume'))
471 hint=_('hg histedit --continue to resume'))
472 else:
472 else:
473 repo.ui.popbuffer()
473 repo.ui.popbuffer()
474
474
475 def continuedirty(self):
475 def continuedirty(self):
476 """Continues the action when changes have been applied to the working
476 """Continues the action when changes have been applied to the working
477 copy. The default behavior is to commit the dirty changes."""
477 copy. The default behavior is to commit the dirty changes."""
478 repo = self.repo
478 repo = self.repo
479 rulectx = repo[self.node]
479 rulectx = repo[self.node]
480
480
481 editor = self.commiteditor()
481 editor = self.commiteditor()
482 commit = commitfuncfor(repo, rulectx)
482 commit = commitfuncfor(repo, rulectx)
483
483
484 commit(text=rulectx.description(), user=rulectx.user(),
484 commit(text=rulectx.description(), user=rulectx.user(),
485 date=rulectx.date(), extra=rulectx.extra(), editor=editor)
485 date=rulectx.date(), extra=rulectx.extra(), editor=editor)
486
486
487 def commiteditor(self):
487 def commiteditor(self):
488 """The editor to be used to edit the commit message."""
488 """The editor to be used to edit the commit message."""
489 return False
489 return False
490
490
491 def continueclean(self):
491 def continueclean(self):
492 """Continues the action when the working copy is clean. The default
492 """Continues the action when the working copy is clean. The default
493 behavior is to accept the current commit as the new version of the
493 behavior is to accept the current commit as the new version of the
494 rulectx."""
494 rulectx."""
495 ctx = self.repo['.']
495 ctx = self.repo['.']
496 if ctx.node() == self.state.parentctxnode:
496 if ctx.node() == self.state.parentctxnode:
497 self.repo.ui.warn(_('%s: skipping changeset (no changes)\n') %
497 self.repo.ui.warn(_('%s: skipping changeset (no changes)\n') %
498 node.short(self.node))
498 node.short(self.node))
499 return ctx, [(self.node, tuple())]
499 return ctx, [(self.node, tuple())]
500 if ctx.node() == self.node:
500 if ctx.node() == self.node:
501 # Nothing changed
501 # Nothing changed
502 return ctx, []
502 return ctx, []
503 return ctx, [(self.node, (ctx.node(),))]
503 return ctx, [(self.node, (ctx.node(),))]
504
504
505 def commitfuncfor(repo, src):
505 def commitfuncfor(repo, src):
506 """Build a commit function for the replacement of <src>
506 """Build a commit function for the replacement of <src>
507
507
508 This function ensure we apply the same treatment to all changesets.
508 This function ensure we apply the same treatment to all changesets.
509
509
510 - Add a 'histedit_source' entry in extra.
510 - Add a 'histedit_source' entry in extra.
511
511
512 Note that fold has its own separated logic because its handling is a bit
512 Note that fold has its own separated logic because its handling is a bit
513 different and not easily factored out of the fold method.
513 different and not easily factored out of the fold method.
514 """
514 """
515 phasemin = src.phase()
515 phasemin = src.phase()
516 def commitfunc(**kwargs):
516 def commitfunc(**kwargs):
517 phasebackup = repo.ui.backupconfig('phases', 'new-commit')
517 phasebackup = repo.ui.backupconfig('phases', 'new-commit')
518 try:
518 try:
519 repo.ui.setconfig('phases', 'new-commit', phasemin,
519 repo.ui.setconfig('phases', 'new-commit', phasemin,
520 'histedit')
520 'histedit')
521 extra = kwargs.get('extra', {}).copy()
521 extra = kwargs.get('extra', {}).copy()
522 extra['histedit_source'] = src.hex()
522 extra['histedit_source'] = src.hex()
523 kwargs['extra'] = extra
523 kwargs['extra'] = extra
524 return repo.commit(**kwargs)
524 return repo.commit(**kwargs)
525 finally:
525 finally:
526 repo.ui.restoreconfig(phasebackup)
526 repo.ui.restoreconfig(phasebackup)
527 return commitfunc
527 return commitfunc
528
528
529 def applychanges(ui, repo, ctx, opts):
529 def applychanges(ui, repo, ctx, opts):
530 """Merge changeset from ctx (only) in the current working directory"""
530 """Merge changeset from ctx (only) in the current working directory"""
531 wcpar = repo.dirstate.parents()[0]
531 wcpar = repo.dirstate.parents()[0]
532 if ctx.p1().node() == wcpar:
532 if ctx.p1().node() == wcpar:
533 # edits are "in place" we do not need to make any merge,
533 # edits are "in place" we do not need to make any merge,
534 # just applies changes on parent for editing
534 # just applies changes on parent for editing
535 cmdutil.revert(ui, repo, ctx, (wcpar, node.nullid), all=True)
535 cmdutil.revert(ui, repo, ctx, (wcpar, node.nullid), all=True)
536 stats = None
536 stats = None
537 else:
537 else:
538 try:
538 try:
539 # ui.forcemerge is an internal variable, do not document
539 # ui.forcemerge is an internal variable, do not document
540 repo.ui.setconfig('ui', 'forcemerge', opts.get('tool', ''),
540 repo.ui.setconfig('ui', 'forcemerge', opts.get('tool', ''),
541 'histedit')
541 'histedit')
542 stats = mergemod.graft(repo, ctx, ctx.p1(), ['local', 'histedit'])
542 stats = mergemod.graft(repo, ctx, ctx.p1(), ['local', 'histedit'])
543 finally:
543 finally:
544 repo.ui.setconfig('ui', 'forcemerge', '', 'histedit')
544 repo.ui.setconfig('ui', 'forcemerge', '', 'histedit')
545 return stats
545 return stats
546
546
547 def collapse(repo, first, last, commitopts, skipprompt=False):
547 def collapse(repo, first, last, commitopts, skipprompt=False):
548 """collapse the set of revisions from first to last as new one.
548 """collapse the set of revisions from first to last as new one.
549
549
550 Expected commit options are:
550 Expected commit options are:
551 - message
551 - message
552 - date
552 - date
553 - username
553 - username
554 Commit message is edited in all cases.
554 Commit message is edited in all cases.
555
555
556 This function works in memory."""
556 This function works in memory."""
557 ctxs = list(repo.set('%d::%d', first, last))
557 ctxs = list(repo.set('%d::%d', first, last))
558 if not ctxs:
558 if not ctxs:
559 return None
559 return None
560 for c in ctxs:
560 for c in ctxs:
561 if not c.mutable():
561 if not c.mutable():
562 raise error.ParseError(
562 raise error.ParseError(
563 _("cannot fold into public change %s") % node.short(c.node()))
563 _("cannot fold into public change %s") % node.short(c.node()))
564 base = first.parents()[0]
564 base = first.parents()[0]
565
565
566 # commit a new version of the old changeset, including the update
566 # commit a new version of the old changeset, including the update
567 # collect all files which might be affected
567 # collect all files which might be affected
568 files = set()
568 files = set()
569 for ctx in ctxs:
569 for ctx in ctxs:
570 files.update(ctx.files())
570 files.update(ctx.files())
571
571
572 # Recompute copies (avoid recording a -> b -> a)
572 # Recompute copies (avoid recording a -> b -> a)
573 copied = copies.pathcopies(base, last)
573 copied = copies.pathcopies(base, last)
574
574
575 # prune files which were reverted by the updates
575 # prune files which were reverted by the updates
576 files = [f for f in files if not cmdutil.samefile(f, last, base)]
576 files = [f for f in files if not cmdutil.samefile(f, last, base)]
577 # commit version of these files as defined by head
577 # commit version of these files as defined by head
578 headmf = last.manifest()
578 headmf = last.manifest()
579 def filectxfn(repo, ctx, path):
579 def filectxfn(repo, ctx, path):
580 if path in headmf:
580 if path in headmf:
581 fctx = last[path]
581 fctx = last[path]
582 flags = fctx.flags()
582 flags = fctx.flags()
583 mctx = context.memfilectx(repo,
583 mctx = context.memfilectx(repo,
584 fctx.path(), fctx.data(),
584 fctx.path(), fctx.data(),
585 islink='l' in flags,
585 islink='l' in flags,
586 isexec='x' in flags,
586 isexec='x' in flags,
587 copied=copied.get(path))
587 copied=copied.get(path))
588 return mctx
588 return mctx
589 return None
589 return None
590
590
591 if commitopts.get('message'):
591 if commitopts.get('message'):
592 message = commitopts['message']
592 message = commitopts['message']
593 else:
593 else:
594 message = first.description()
594 message = first.description()
595 user = commitopts.get('user')
595 user = commitopts.get('user')
596 date = commitopts.get('date')
596 date = commitopts.get('date')
597 extra = commitopts.get('extra')
597 extra = commitopts.get('extra')
598
598
599 parents = (first.p1().node(), first.p2().node())
599 parents = (first.p1().node(), first.p2().node())
600 editor = None
600 editor = None
601 if not skipprompt:
601 if not skipprompt:
602 editor = cmdutil.getcommiteditor(edit=True, editform='histedit.fold')
602 editor = cmdutil.getcommiteditor(edit=True, editform='histedit.fold')
603 new = context.memctx(repo,
603 new = context.memctx(repo,
604 parents=parents,
604 parents=parents,
605 text=message,
605 text=message,
606 files=files,
606 files=files,
607 filectxfn=filectxfn,
607 filectxfn=filectxfn,
608 user=user,
608 user=user,
609 date=date,
609 date=date,
610 extra=extra,
610 extra=extra,
611 editor=editor)
611 editor=editor)
612 return repo.commitctx(new)
612 return repo.commitctx(new)
613
613
614 def _isdirtywc(repo):
614 def _isdirtywc(repo):
615 return repo[None].dirty(missing=True)
615 return repo[None].dirty(missing=True)
616
616
617 def abortdirty():
617 def abortdirty():
618 raise error.Abort(_('working copy has pending changes'),
618 raise error.Abort(_('working copy has pending changes'),
619 hint=_('amend, commit, or revert them and run histedit '
619 hint=_('amend, commit, or revert them and run histedit '
620 '--continue, or abort with histedit --abort'))
620 '--continue, or abort with histedit --abort'))
621
621
622 def action(verbs, message, priority=False, internal=False):
622 def action(verbs, message, priority=False, internal=False):
623 def wrap(cls):
623 def wrap(cls):
624 assert not priority or not internal
624 assert not priority or not internal
625 verb = verbs[0]
625 verb = verbs[0]
626 if priority:
626 if priority:
627 primaryactions.add(verb)
627 primaryactions.add(verb)
628 elif internal:
628 elif internal:
629 internalactions.add(verb)
629 internalactions.add(verb)
630 elif len(verbs) > 1:
630 elif len(verbs) > 1:
631 secondaryactions.add(verb)
631 secondaryactions.add(verb)
632 else:
632 else:
633 tertiaryactions.add(verb)
633 tertiaryactions.add(verb)
634
634
635 cls.verb = verb
635 cls.verb = verb
636 cls.verbs = verbs
636 cls.verbs = verbs
637 cls.message = message
637 cls.message = message
638 for verb in verbs:
638 for verb in verbs:
639 actiontable[verb] = cls
639 actiontable[verb] = cls
640 return cls
640 return cls
641 return wrap
641 return wrap
642
642
643 @action(['pick', 'p'],
643 @action(['pick', 'p'],
644 _('use commit'),
644 _('use commit'),
645 priority=True)
645 priority=True)
646 class pick(histeditaction):
646 class pick(histeditaction):
647 def run(self):
647 def run(self):
648 rulectx = self.repo[self.node]
648 rulectx = self.repo[self.node]
649 if rulectx.parents()[0].node() == self.state.parentctxnode:
649 if rulectx.parents()[0].node() == self.state.parentctxnode:
650 self.repo.ui.debug('node %s unchanged\n' % node.short(self.node))
650 self.repo.ui.debug('node %s unchanged\n' % node.short(self.node))
651 return rulectx, []
651 return rulectx, []
652
652
653 return super(pick, self).run()
653 return super(pick, self).run()
654
654
655 @action(['edit', 'e'],
655 @action(['edit', 'e'],
656 _('use commit, but stop for amending'),
656 _('use commit, but stop for amending'),
657 priority=True)
657 priority=True)
658 class edit(histeditaction):
658 class edit(histeditaction):
659 def run(self):
659 def run(self):
660 repo = self.repo
660 repo = self.repo
661 rulectx = repo[self.node]
661 rulectx = repo[self.node]
662 hg.update(repo, self.state.parentctxnode, quietempty=True)
662 hg.update(repo, self.state.parentctxnode, quietempty=True)
663 applychanges(repo.ui, repo, rulectx, {})
663 applychanges(repo.ui, repo, rulectx, {})
664 raise error.InterventionRequired(
664 raise error.InterventionRequired(
665 _('Editing (%s), you may commit or record as needed now.')
665 _('Editing (%s), you may commit or record as needed now.')
666 % node.short(self.node),
666 % node.short(self.node),
667 hint=_('hg histedit --continue to resume'))
667 hint=_('hg histedit --continue to resume'))
668
668
669 def commiteditor(self):
669 def commiteditor(self):
670 return cmdutil.getcommiteditor(edit=True, editform='histedit.edit')
670 return cmdutil.getcommiteditor(edit=True, editform='histedit.edit')
671
671
672 @action(['fold', 'f'],
672 @action(['fold', 'f'],
673 _('use commit, but combine it with the one above'))
673 _('use commit, but combine it with the one above'))
674 class fold(histeditaction):
674 class fold(histeditaction):
675 def verify(self, prev):
675 def verify(self, prev):
676 """ Verifies semantic correctness of the fold rule"""
676 """ Verifies semantic correctness of the fold rule"""
677 super(fold, self).verify(prev)
677 super(fold, self).verify(prev)
678 repo = self.repo
678 repo = self.repo
679 if not prev:
679 if not prev:
680 c = repo[self.node].parents()[0]
680 c = repo[self.node].parents()[0]
681 elif not prev.verb in ('pick', 'base'):
681 elif not prev.verb in ('pick', 'base'):
682 return
682 return
683 else:
683 else:
684 c = repo[prev.node]
684 c = repo[prev.node]
685 if not c.mutable():
685 if not c.mutable():
686 raise error.ParseError(
686 raise error.ParseError(
687 _("cannot fold into public change %s") % node.short(c.node()))
687 _("cannot fold into public change %s") % node.short(c.node()))
688
688
689
689
690 def continuedirty(self):
690 def continuedirty(self):
691 repo = self.repo
691 repo = self.repo
692 rulectx = repo[self.node]
692 rulectx = repo[self.node]
693
693
694 commit = commitfuncfor(repo, rulectx)
694 commit = commitfuncfor(repo, rulectx)
695 commit(text='fold-temp-revision %s' % node.short(self.node),
695 commit(text='fold-temp-revision %s' % node.short(self.node),
696 user=rulectx.user(), date=rulectx.date(),
696 user=rulectx.user(), date=rulectx.date(),
697 extra=rulectx.extra())
697 extra=rulectx.extra())
698
698
699 def continueclean(self):
699 def continueclean(self):
700 repo = self.repo
700 repo = self.repo
701 ctx = repo['.']
701 ctx = repo['.']
702 rulectx = repo[self.node]
702 rulectx = repo[self.node]
703 parentctxnode = self.state.parentctxnode
703 parentctxnode = self.state.parentctxnode
704 if ctx.node() == parentctxnode:
704 if ctx.node() == parentctxnode:
705 repo.ui.warn(_('%s: empty changeset\n') %
705 repo.ui.warn(_('%s: empty changeset\n') %
706 node.short(self.node))
706 node.short(self.node))
707 return ctx, [(self.node, (parentctxnode,))]
707 return ctx, [(self.node, (parentctxnode,))]
708
708
709 parentctx = repo[parentctxnode]
709 parentctx = repo[parentctxnode]
710 newcommits = set(c.node() for c in repo.set('(%d::. - %d)', parentctx,
710 newcommits = set(c.node() for c in repo.set('(%d::. - %d)', parentctx,
711 parentctx))
711 parentctx))
712 if not newcommits:
712 if not newcommits:
713 repo.ui.warn(_('%s: cannot fold - working copy is not a '
713 repo.ui.warn(_('%s: cannot fold - working copy is not a '
714 'descendant of previous commit %s\n') %
714 'descendant of previous commit %s\n') %
715 (node.short(self.node), node.short(parentctxnode)))
715 (node.short(self.node), node.short(parentctxnode)))
716 return ctx, [(self.node, (ctx.node(),))]
716 return ctx, [(self.node, (ctx.node(),))]
717
717
718 middlecommits = newcommits.copy()
718 middlecommits = newcommits.copy()
719 middlecommits.discard(ctx.node())
719 middlecommits.discard(ctx.node())
720
720
721 return self.finishfold(repo.ui, repo, parentctx, rulectx, ctx.node(),
721 return self.finishfold(repo.ui, repo, parentctx, rulectx, ctx.node(),
722 middlecommits)
722 middlecommits)
723
723
724 def skipprompt(self):
724 def skipprompt(self):
725 """Returns true if the rule should skip the message editor.
725 """Returns true if the rule should skip the message editor.
726
726
727 For example, 'fold' wants to show an editor, but 'rollup'
727 For example, 'fold' wants to show an editor, but 'rollup'
728 doesn't want to.
728 doesn't want to.
729 """
729 """
730 return False
730 return False
731
731
732 def mergedescs(self):
732 def mergedescs(self):
733 """Returns true if the rule should merge messages of multiple changes.
733 """Returns true if the rule should merge messages of multiple changes.
734
734
735 This exists mainly so that 'rollup' rules can be a subclass of
735 This exists mainly so that 'rollup' rules can be a subclass of
736 'fold'.
736 'fold'.
737 """
737 """
738 return True
738 return True
739
739
740 def finishfold(self, ui, repo, ctx, oldctx, newnode, internalchanges):
740 def finishfold(self, ui, repo, ctx, oldctx, newnode, internalchanges):
741 parent = ctx.parents()[0].node()
741 parent = ctx.parents()[0].node()
742 repo.ui.pushbuffer()
742 repo.ui.pushbuffer()
743 hg.update(repo, parent)
743 hg.update(repo, parent)
744 repo.ui.popbuffer()
744 repo.ui.popbuffer()
745 ### prepare new commit data
745 ### prepare new commit data
746 commitopts = {}
746 commitopts = {}
747 commitopts['user'] = ctx.user()
747 commitopts['user'] = ctx.user()
748 # commit message
748 # commit message
749 if not self.mergedescs():
749 if not self.mergedescs():
750 newmessage = ctx.description()
750 newmessage = ctx.description()
751 else:
751 else:
752 newmessage = '\n***\n'.join(
752 newmessage = '\n***\n'.join(
753 [ctx.description()] +
753 [ctx.description()] +
754 [repo[r].description() for r in internalchanges] +
754 [repo[r].description() for r in internalchanges] +
755 [oldctx.description()]) + '\n'
755 [oldctx.description()]) + '\n'
756 commitopts['message'] = newmessage
756 commitopts['message'] = newmessage
757 # date
757 # date
758 commitopts['date'] = max(ctx.date(), oldctx.date())
758 commitopts['date'] = max(ctx.date(), oldctx.date())
759 extra = ctx.extra().copy()
759 extra = ctx.extra().copy()
760 # histedit_source
760 # histedit_source
761 # note: ctx is likely a temporary commit but that the best we can do
761 # note: ctx is likely a temporary commit but that the best we can do
762 # here. This is sufficient to solve issue3681 anyway.
762 # here. This is sufficient to solve issue3681 anyway.
763 extra['histedit_source'] = '%s,%s' % (ctx.hex(), oldctx.hex())
763 extra['histedit_source'] = '%s,%s' % (ctx.hex(), oldctx.hex())
764 commitopts['extra'] = extra
764 commitopts['extra'] = extra
765 phasebackup = repo.ui.backupconfig('phases', 'new-commit')
765 phasebackup = repo.ui.backupconfig('phases', 'new-commit')
766 try:
766 try:
767 phasemin = max(ctx.phase(), oldctx.phase())
767 phasemin = max(ctx.phase(), oldctx.phase())
768 repo.ui.setconfig('phases', 'new-commit', phasemin, 'histedit')
768 repo.ui.setconfig('phases', 'new-commit', phasemin, 'histedit')
769 n = collapse(repo, ctx, repo[newnode], commitopts,
769 n = collapse(repo, ctx, repo[newnode], commitopts,
770 skipprompt=self.skipprompt())
770 skipprompt=self.skipprompt())
771 finally:
771 finally:
772 repo.ui.restoreconfig(phasebackup)
772 repo.ui.restoreconfig(phasebackup)
773 if n is None:
773 if n is None:
774 return ctx, []
774 return ctx, []
775 repo.ui.pushbuffer()
775 repo.ui.pushbuffer()
776 hg.update(repo, n)
776 hg.update(repo, n)
777 repo.ui.popbuffer()
777 repo.ui.popbuffer()
778 replacements = [(oldctx.node(), (newnode,)),
778 replacements = [(oldctx.node(), (newnode,)),
779 (ctx.node(), (n,)),
779 (ctx.node(), (n,)),
780 (newnode, (n,)),
780 (newnode, (n,)),
781 ]
781 ]
782 for ich in internalchanges:
782 for ich in internalchanges:
783 replacements.append((ich, (n,)))
783 replacements.append((ich, (n,)))
784 return repo[n], replacements
784 return repo[n], replacements
785
785
786 class base(histeditaction):
786 class base(histeditaction):
787 def constraints(self):
787 def constraints(self):
788 return set([_constraints.forceother])
788 return set([_constraints.forceother])
789
789
790 def run(self):
790 def run(self):
791 if self.repo['.'].node() != self.node:
791 if self.repo['.'].node() != self.node:
792 mergemod.update(self.repo, self.node, False, True)
792 mergemod.update(self.repo, self.node, False, True)
793 # branchmerge, force)
793 # branchmerge, force)
794 return self.continueclean()
794 return self.continueclean()
795
795
796 def continuedirty(self):
796 def continuedirty(self):
797 abortdirty()
797 abortdirty()
798
798
799 def continueclean(self):
799 def continueclean(self):
800 basectx = self.repo['.']
800 basectx = self.repo['.']
801 return basectx, []
801 return basectx, []
802
802
803 @action(['_multifold'],
803 @action(['_multifold'],
804 _(
804 _(
805 """fold subclass used for when multiple folds happen in a row
805 """fold subclass used for when multiple folds happen in a row
806
806
807 We only want to fire the editor for the folded message once when
807 We only want to fire the editor for the folded message once when
808 (say) four changes are folded down into a single change. This is
808 (say) four changes are folded down into a single change. This is
809 similar to rollup, but we should preserve both messages so that
809 similar to rollup, but we should preserve both messages so that
810 when the last fold operation runs we can show the user all the
810 when the last fold operation runs we can show the user all the
811 commit messages in their editor.
811 commit messages in their editor.
812 """),
812 """),
813 internal=True)
813 internal=True)
814 class _multifold(fold):
814 class _multifold(fold):
815 def skipprompt(self):
815 def skipprompt(self):
816 return True
816 return True
817
817
818 @action(["roll", "r"],
818 @action(["roll", "r"],
819 _("like fold, but discard this commit's description"))
819 _("like fold, but discard this commit's description"))
820 class rollup(fold):
820 class rollup(fold):
821 def mergedescs(self):
821 def mergedescs(self):
822 return False
822 return False
823
823
824 def skipprompt(self):
824 def skipprompt(self):
825 return True
825 return True
826
826
827 @action(["drop", "d"],
827 @action(["drop", "d"],
828 _('remove commit from history'))
828 _('remove commit from history'))
829 class drop(histeditaction):
829 class drop(histeditaction):
830 def run(self):
830 def run(self):
831 parentctx = self.repo[self.state.parentctxnode]
831 parentctx = self.repo[self.state.parentctxnode]
832 return parentctx, [(self.node, tuple())]
832 return parentctx, [(self.node, tuple())]
833
833
834 @action(["mess", "m"],
834 @action(["mess", "m"],
835 _('edit commit message without changing commit content'),
835 _('edit commit message without changing commit content'),
836 priority=True)
836 priority=True)
837 class message(histeditaction):
837 class message(histeditaction):
838 def commiteditor(self):
838 def commiteditor(self):
839 return cmdutil.getcommiteditor(edit=True, editform='histedit.mess')
839 return cmdutil.getcommiteditor(edit=True, editform='histedit.mess')
840
840
841 def findoutgoing(ui, repo, remote=None, force=False, opts=None):
841 def findoutgoing(ui, repo, remote=None, force=False, opts=None):
842 """utility function to find the first outgoing changeset
842 """utility function to find the first outgoing changeset
843
843
844 Used by initialization code"""
844 Used by initialization code"""
845 if opts is None:
845 if opts is None:
846 opts = {}
846 opts = {}
847 dest = ui.expandpath(remote or 'default-push', remote or 'default')
847 dest = ui.expandpath(remote or 'default-push', remote or 'default')
848 dest, revs = hg.parseurl(dest, None)[:2]
848 dest, revs = hg.parseurl(dest, None)[:2]
849 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
849 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
850
850
851 revs, checkout = hg.addbranchrevs(repo, repo, revs, None)
851 revs, checkout = hg.addbranchrevs(repo, repo, revs, None)
852 other = hg.peer(repo, opts, dest)
852 other = hg.peer(repo, opts, dest)
853
853
854 if revs:
854 if revs:
855 revs = [repo.lookup(rev) for rev in revs]
855 revs = [repo.lookup(rev) for rev in revs]
856
856
857 outgoing = discovery.findcommonoutgoing(repo, other, revs, force=force)
857 outgoing = discovery.findcommonoutgoing(repo, other, revs, force=force)
858 if not outgoing.missing:
858 if not outgoing.missing:
859 raise error.Abort(_('no outgoing ancestors'))
859 raise error.Abort(_('no outgoing ancestors'))
860 roots = list(repo.revs("roots(%ln)", outgoing.missing))
860 roots = list(repo.revs("roots(%ln)", outgoing.missing))
861 if 1 < len(roots):
861 if 1 < len(roots):
862 msg = _('there are ambiguous outgoing revisions')
862 msg = _('there are ambiguous outgoing revisions')
863 hint = _('see "hg help histedit" for more detail')
863 hint = _('see "hg help histedit" for more detail')
864 raise error.Abort(msg, hint=hint)
864 raise error.Abort(msg, hint=hint)
865 return repo.lookup(roots[0])
865 return repo.lookup(roots[0])
866
866
867
867
868 @command('histedit',
868 @command('histedit',
869 [('', 'commands', '',
869 [('', 'commands', '',
870 _('read history edits from the specified file'), _('FILE')),
870 _('read history edits from the specified file'), _('FILE')),
871 ('c', 'continue', False, _('continue an edit already in progress')),
871 ('c', 'continue', False, _('continue an edit already in progress')),
872 ('', 'edit-plan', False, _('edit remaining actions list')),
872 ('', 'edit-plan', False, _('edit remaining actions list')),
873 ('k', 'keep', False,
873 ('k', 'keep', False,
874 _("don't strip old nodes after edit is complete")),
874 _("don't strip old nodes after edit is complete")),
875 ('', 'abort', False, _('abort an edit in progress')),
875 ('', 'abort', False, _('abort an edit in progress')),
876 ('o', 'outgoing', False, _('changesets not found in destination')),
876 ('o', 'outgoing', False, _('changesets not found in destination')),
877 ('f', 'force', False,
877 ('f', 'force', False,
878 _('force outgoing even for unrelated repositories')),
878 _('force outgoing even for unrelated repositories')),
879 ('r', 'rev', [], _('first revision to be edited'), _('REV'))],
879 ('r', 'rev', [], _('first revision to be edited'), _('REV'))],
880 _("[OPTIONS] ([ANCESTOR] | --outgoing [URL])"))
880 _("[OPTIONS] ([ANCESTOR] | --outgoing [URL])"))
881 def histedit(ui, repo, *freeargs, **opts):
881 def histedit(ui, repo, *freeargs, **opts):
882 """interactively edit changeset history
882 """interactively edit changeset history
883
883
884 This command lets you edit a linear series of changesets (up to
884 This command lets you edit a linear series of changesets (up to
885 and including the working directory, which should be clean).
885 and including the working directory, which should be clean).
886 You can:
886 You can:
887
887
888 - `pick` to [re]order a changeset
888 - `pick` to [re]order a changeset
889
889
890 - `drop` to omit changeset
890 - `drop` to omit changeset
891
891
892 - `mess` to reword the changeset commit message
892 - `mess` to reword the changeset commit message
893
893
894 - `fold` to combine it with the preceding changeset
894 - `fold` to combine it with the preceding changeset
895
895
896 - `roll` like fold, but discarding this commit's description
896 - `roll` like fold, but discarding this commit's description
897
897
898 - `edit` to edit this changeset
898 - `edit` to edit this changeset
899
899
900 There are a number of ways to select the root changeset:
900 There are a number of ways to select the root changeset:
901
901
902 - Specify ANCESTOR directly
902 - Specify ANCESTOR directly
903
903
904 - Use --outgoing -- it will be the first linear changeset not
904 - Use --outgoing -- it will be the first linear changeset not
905 included in destination. (See :hg:`help config.paths.default-push`)
905 included in destination. (See :hg:`help config.paths.default-push`)
906
906
907 - Otherwise, the value from the "histedit.defaultrev" config option
907 - Otherwise, the value from the "histedit.defaultrev" config option
908 is used as a revset to select the base revision when ANCESTOR is not
908 is used as a revset to select the base revision when ANCESTOR is not
909 specified. The first revision returned by the revset is used. By
909 specified. The first revision returned by the revset is used. By
910 default, this selects the editable history that is unique to the
910 default, this selects the editable history that is unique to the
911 ancestry of the working directory.
911 ancestry of the working directory.
912
912
913 .. container:: verbose
913 .. container:: verbose
914
914
915 If you use --outgoing, this command will abort if there are ambiguous
915 If you use --outgoing, this command will abort if there are ambiguous
916 outgoing revisions. For example, if there are multiple branches
916 outgoing revisions. For example, if there are multiple branches
917 containing outgoing revisions.
917 containing outgoing revisions.
918
918
919 Use "min(outgoing() and ::.)" or similar revset specification
919 Use "min(outgoing() and ::.)" or similar revset specification
920 instead of --outgoing to specify edit target revision exactly in
920 instead of --outgoing to specify edit target revision exactly in
921 such ambiguous situation. See :hg:`help revsets` for detail about
921 such ambiguous situation. See :hg:`help revsets` for detail about
922 selecting revisions.
922 selecting revisions.
923
923
924 .. container:: verbose
924 .. container:: verbose
925
925
926 Examples:
926 Examples:
927
927
928 - A number of changes have been made.
928 - A number of changes have been made.
929 Revision 3 is no longer needed.
929 Revision 3 is no longer needed.
930
930
931 Start history editing from revision 3::
931 Start history editing from revision 3::
932
932
933 hg histedit -r 3
933 hg histedit -r 3
934
934
935 An editor opens, containing the list of revisions,
935 An editor opens, containing the list of revisions,
936 with specific actions specified::
936 with specific actions specified::
937
937
938 pick 5339bf82f0ca 3 Zworgle the foobar
938 pick 5339bf82f0ca 3 Zworgle the foobar
939 pick 8ef592ce7cc4 4 Bedazzle the zerlog
939 pick 8ef592ce7cc4 4 Bedazzle the zerlog
940 pick 0a9639fcda9d 5 Morgify the cromulancy
940 pick 0a9639fcda9d 5 Morgify the cromulancy
941
941
942 Additional information about the possible actions
942 Additional information about the possible actions
943 to take appears below the list of revisions.
943 to take appears below the list of revisions.
944
944
945 To remove revision 3 from the history,
945 To remove revision 3 from the history,
946 its action (at the beginning of the relevant line)
946 its action (at the beginning of the relevant line)
947 is changed to 'drop'::
947 is changed to 'drop'::
948
948
949 drop 5339bf82f0ca 3 Zworgle the foobar
949 drop 5339bf82f0ca 3 Zworgle the foobar
950 pick 8ef592ce7cc4 4 Bedazzle the zerlog
950 pick 8ef592ce7cc4 4 Bedazzle the zerlog
951 pick 0a9639fcda9d 5 Morgify the cromulancy
951 pick 0a9639fcda9d 5 Morgify the cromulancy
952
952
953 - A number of changes have been made.
953 - A number of changes have been made.
954 Revision 2 and 4 need to be swapped.
954 Revision 2 and 4 need to be swapped.
955
955
956 Start history editing from revision 2::
956 Start history editing from revision 2::
957
957
958 hg histedit -r 2
958 hg histedit -r 2
959
959
960 An editor opens, containing the list of revisions,
960 An editor opens, containing the list of revisions,
961 with specific actions specified::
961 with specific actions specified::
962
962
963 pick 252a1af424ad 2 Blorb a morgwazzle
963 pick 252a1af424ad 2 Blorb a morgwazzle
964 pick 5339bf82f0ca 3 Zworgle the foobar
964 pick 5339bf82f0ca 3 Zworgle the foobar
965 pick 8ef592ce7cc4 4 Bedazzle the zerlog
965 pick 8ef592ce7cc4 4 Bedazzle the zerlog
966
966
967 To swap revision 2 and 4, its lines are swapped
967 To swap revision 2 and 4, its lines are swapped
968 in the editor::
968 in the editor::
969
969
970 pick 8ef592ce7cc4 4 Bedazzle the zerlog
970 pick 8ef592ce7cc4 4 Bedazzle the zerlog
971 pick 5339bf82f0ca 3 Zworgle the foobar
971 pick 5339bf82f0ca 3 Zworgle the foobar
972 pick 252a1af424ad 2 Blorb a morgwazzle
972 pick 252a1af424ad 2 Blorb a morgwazzle
973
973
974 Returns 0 on success, 1 if user intervention is required (not only
974 Returns 0 on success, 1 if user intervention is required (not only
975 for intentional "edit" command, but also for resolving unexpected
975 for intentional "edit" command, but also for resolving unexpected
976 conflicts).
976 conflicts).
977 """
977 """
978 state = histeditstate(repo)
978 state = histeditstate(repo)
979 try:
979 try:
980 state.wlock = repo.wlock()
980 state.wlock = repo.wlock()
981 state.lock = repo.lock()
981 state.lock = repo.lock()
982 _histedit(ui, repo, state, *freeargs, **opts)
982 _histedit(ui, repo, state, *freeargs, **opts)
983 finally:
983 finally:
984 release(state.lock, state.wlock)
984 release(state.lock, state.wlock)
985
985
986 goalcontinue = 'continue'
986 goalcontinue = 'continue'
987 goalabort = 'abort'
987 goalabort = 'abort'
988 goaleditplan = 'edit-plan'
988 goaleditplan = 'edit-plan'
989 goalnew = 'new'
989 goalnew = 'new'
990
990
991 def _getgoal(opts):
991 def _getgoal(opts):
992 if opts.get('continue'):
992 if opts.get('continue'):
993 return goalcontinue
993 return goalcontinue
994 if opts.get('abort'):
994 if opts.get('abort'):
995 return goalabort
995 return goalabort
996 if opts.get('edit_plan'):
996 if opts.get('edit_plan'):
997 return goaleditplan
997 return goaleditplan
998 return goalnew
998 return goalnew
999
999
1000 def _readfile(path):
1000 def _readfile(path):
1001 if path == '-':
1001 if path == '-':
1002 return sys.stdin.read()
1002 return sys.stdin.read()
1003 else:
1003 else:
1004 with open(path, 'rb') as f:
1004 with open(path, 'rb') as f:
1005 return f.read()
1005 return f.read()
1006
1006
1007 def _validateargs(ui, repo, state, freeargs, opts, goal, rules, revs):
1007 def _validateargs(ui, repo, state, freeargs, opts, goal, rules, revs):
1008 # TODO only abort if we try to histedit mq patches, not just
1008 # TODO only abort if we try to histedit mq patches, not just
1009 # blanket if mq patches are applied somewhere
1009 # blanket if mq patches are applied somewhere
1010 mq = getattr(repo, 'mq', None)
1010 mq = getattr(repo, 'mq', None)
1011 if mq and mq.applied:
1011 if mq and mq.applied:
1012 raise error.Abort(_('source has mq patches applied'))
1012 raise error.Abort(_('source has mq patches applied'))
1013
1013
1014 # basic argument incompatibility processing
1014 # basic argument incompatibility processing
1015 outg = opts.get('outgoing')
1015 outg = opts.get('outgoing')
1016 editplan = opts.get('edit_plan')
1016 editplan = opts.get('edit_plan')
1017 abort = opts.get('abort')
1017 abort = opts.get('abort')
1018 force = opts.get('force')
1018 force = opts.get('force')
1019 if force and not outg:
1019 if force and not outg:
1020 raise error.Abort(_('--force only allowed with --outgoing'))
1020 raise error.Abort(_('--force only allowed with --outgoing'))
1021 if goal == 'continue':
1021 if goal == 'continue':
1022 if any((outg, abort, revs, freeargs, rules, editplan)):
1022 if any((outg, abort, revs, freeargs, rules, editplan)):
1023 raise error.Abort(_('no arguments allowed with --continue'))
1023 raise error.Abort(_('no arguments allowed with --continue'))
1024 elif goal == 'abort':
1024 elif goal == 'abort':
1025 if any((outg, revs, freeargs, rules, editplan)):
1025 if any((outg, revs, freeargs, rules, editplan)):
1026 raise error.Abort(_('no arguments allowed with --abort'))
1026 raise error.Abort(_('no arguments allowed with --abort'))
1027 elif goal == 'edit-plan':
1027 elif goal == 'edit-plan':
1028 if any((outg, revs, freeargs)):
1028 if any((outg, revs, freeargs)):
1029 raise error.Abort(_('only --commands argument allowed with '
1029 raise error.Abort(_('only --commands argument allowed with '
1030 '--edit-plan'))
1030 '--edit-plan'))
1031 else:
1031 else:
1032 if os.path.exists(os.path.join(repo.path, 'histedit-state')):
1032 if os.path.exists(os.path.join(repo.path, 'histedit-state')):
1033 raise error.Abort(_('history edit already in progress, try '
1033 raise error.Abort(_('history edit already in progress, try '
1034 '--continue or --abort'))
1034 '--continue or --abort'))
1035 if outg:
1035 if outg:
1036 if revs:
1036 if revs:
1037 raise error.Abort(_('no revisions allowed with --outgoing'))
1037 raise error.Abort(_('no revisions allowed with --outgoing'))
1038 if len(freeargs) > 1:
1038 if len(freeargs) > 1:
1039 raise error.Abort(
1039 raise error.Abort(
1040 _('only one repo argument allowed with --outgoing'))
1040 _('only one repo argument allowed with --outgoing'))
1041 else:
1041 else:
1042 revs.extend(freeargs)
1042 revs.extend(freeargs)
1043 if len(revs) == 0:
1043 if len(revs) == 0:
1044 defaultrev = destutil.desthistedit(ui, repo)
1044 defaultrev = destutil.desthistedit(ui, repo)
1045 if defaultrev is not None:
1045 if defaultrev is not None:
1046 revs.append(defaultrev)
1046 revs.append(defaultrev)
1047
1047
1048 if len(revs) != 1:
1048 if len(revs) != 1:
1049 raise error.Abort(
1049 raise error.Abort(
1050 _('histedit requires exactly one ancestor revision'))
1050 _('histedit requires exactly one ancestor revision'))
1051
1051
1052 def _histedit(ui, repo, state, *freeargs, **opts):
1052 def _histedit(ui, repo, state, *freeargs, **opts):
1053 goal = _getgoal(opts)
1053 goal = _getgoal(opts)
1054 revs = opts.get('rev', [])
1054 revs = opts.get('rev', [])
1055 rules = opts.get('commands', '')
1055 rules = opts.get('commands', '')
1056 state.keep = opts.get('keep', False)
1056 state.keep = opts.get('keep', False)
1057
1057
1058 _validateargs(ui, repo, state, freeargs, opts, goal, rules, revs)
1058 _validateargs(ui, repo, state, freeargs, opts, goal, rules, revs)
1059
1059
1060 # rebuild state
1060 # rebuild state
1061 if goal == goalcontinue:
1061 if goal == goalcontinue:
1062 state.read()
1062 state.read()
1063 state = bootstrapcontinue(ui, state, opts)
1063 state = bootstrapcontinue(ui, state, opts)
1064 elif goal == goaleditplan:
1064 elif goal == goaleditplan:
1065 _edithisteditplan(ui, repo, state, rules)
1065 _edithisteditplan(ui, repo, state, rules)
1066 return
1066 return
1067 elif goal == goalabort:
1067 elif goal == goalabort:
1068 _aborthistedit(ui, repo, state)
1068 _aborthistedit(ui, repo, state)
1069 return
1069 return
1070 else:
1070 else:
1071 # goal == goalnew
1071 # goal == goalnew
1072 _newhistedit(ui, repo, state, revs, freeargs, opts)
1072 _newhistedit(ui, repo, state, revs, freeargs, opts)
1073
1073
1074 _continuehistedit(ui, repo, state)
1074 _continuehistedit(ui, repo, state)
1075 _finishhistedit(ui, repo, state)
1075 _finishhistedit(ui, repo, state)
1076
1076
1077 def _continuehistedit(ui, repo, state):
1077 def _continuehistedit(ui, repo, state):
1078 """This function runs after either:
1078 """This function runs after either:
1079 - bootstrapcontinue (if the goal is 'continue')
1079 - bootstrapcontinue (if the goal is 'continue')
1080 - _newhistedit (if the goal is 'new')
1080 - _newhistedit (if the goal is 'new')
1081 """
1081 """
1082 # preprocess rules so that we can hide inner folds from the user
1082 # preprocess rules so that we can hide inner folds from the user
1083 # and only show one editor
1083 # and only show one editor
1084 actions = state.actions[:]
1084 actions = state.actions[:]
1085 for idx, (action, nextact) in enumerate(
1085 for idx, (action, nextact) in enumerate(
1086 zip(actions, actions[1:] + [None])):
1086 zip(actions, actions[1:] + [None])):
1087 if action.verb == 'fold' and nextact and nextact.verb == 'fold':
1087 if action.verb == 'fold' and nextact and nextact.verb == 'fold':
1088 state.actions[idx].__class__ = _multifold
1088 state.actions[idx].__class__ = _multifold
1089
1089
1090 total = len(state.actions)
1090 total = len(state.actions)
1091 pos = 0
1091 pos = 0
1092 while state.actions:
1092 while state.actions:
1093 state.write()
1093 state.write()
1094 actobj = state.actions.pop(0)
1094 actobj = state.actions.pop(0)
1095 pos += 1
1095 pos += 1
1096 ui.progress(_("editing"), pos, actobj.torule(),
1096 ui.progress(_("editing"), pos, actobj.torule(),
1097 _('changes'), total)
1097 _('changes'), total)
1098 ui.debug('histedit: processing %s %s\n' % (actobj.verb,\
1098 ui.debug('histedit: processing %s %s\n' % (actobj.verb,\
1099 actobj.torule()))
1099 actobj.torule()))
1100 parentctx, replacement_ = actobj.run()
1100 parentctx, replacement_ = actobj.run()
1101 state.parentctxnode = parentctx.node()
1101 state.parentctxnode = parentctx.node()
1102 state.replacements.extend(replacement_)
1102 state.replacements.extend(replacement_)
1103 state.write()
1103 state.write()
1104 ui.progress(_("editing"), None)
1104 ui.progress(_("editing"), None)
1105
1105
1106 def _finishhistedit(ui, repo, state):
1106 def _finishhistedit(ui, repo, state):
1107 """This action runs when histedit is finishing its session"""
1107 """This action runs when histedit is finishing its session"""
1108 repo.ui.pushbuffer()
1108 repo.ui.pushbuffer()
1109 hg.update(repo, state.parentctxnode, quietempty=True)
1109 hg.update(repo, state.parentctxnode, quietempty=True)
1110 repo.ui.popbuffer()
1110 repo.ui.popbuffer()
1111
1111
1112 mapping, tmpnodes, created, ntm = processreplacement(state)
1112 mapping, tmpnodes, created, ntm = processreplacement(state)
1113 if mapping:
1113 if mapping:
1114 for prec, succs in mapping.iteritems():
1114 for prec, succs in mapping.iteritems():
1115 if not succs:
1115 if not succs:
1116 ui.debug('histedit: %s is dropped\n' % node.short(prec))
1116 ui.debug('histedit: %s is dropped\n' % node.short(prec))
1117 else:
1117 else:
1118 ui.debug('histedit: %s is replaced by %s\n' % (
1118 ui.debug('histedit: %s is replaced by %s\n' % (
1119 node.short(prec), node.short(succs[0])))
1119 node.short(prec), node.short(succs[0])))
1120 if len(succs) > 1:
1120 if len(succs) > 1:
1121 m = 'histedit: %s'
1121 m = 'histedit: %s'
1122 for n in succs[1:]:
1122 for n in succs[1:]:
1123 ui.debug(m % node.short(n))
1123 ui.debug(m % node.short(n))
1124
1124
1125 supportsmarkers = obsolete.isenabled(repo, obsolete.createmarkersopt)
1125 supportsmarkers = obsolete.isenabled(repo, obsolete.createmarkersopt)
1126 if supportsmarkers:
1126 if supportsmarkers:
1127 # Only create markers if the temp nodes weren't already removed.
1127 # Only create markers if the temp nodes weren't already removed.
1128 obsolete.createmarkers(repo, ((repo[t],()) for t in sorted(tmpnodes)
1128 obsolete.createmarkers(repo, ((repo[t],()) for t in sorted(tmpnodes)
1129 if t in repo))
1129 if t in repo))
1130 else:
1130 else:
1131 cleanupnode(ui, repo, 'temp', tmpnodes)
1131 cleanupnode(ui, repo, 'temp', tmpnodes)
1132
1132
1133 if not state.keep:
1133 if not state.keep:
1134 if mapping:
1134 if mapping:
1135 movebookmarks(ui, repo, mapping, state.topmost, ntm)
1135 movebookmarks(ui, repo, mapping, state.topmost, ntm)
1136 # TODO update mq state
1136 # TODO update mq state
1137 if supportsmarkers:
1137 if supportsmarkers:
1138 markers = []
1138 markers = []
1139 # sort by revision number because it sound "right"
1139 # sort by revision number because it sound "right"
1140 for prec in sorted(mapping, key=repo.changelog.rev):
1140 for prec in sorted(mapping, key=repo.changelog.rev):
1141 succs = mapping[prec]
1141 succs = mapping[prec]
1142 markers.append((repo[prec],
1142 markers.append((repo[prec],
1143 tuple(repo[s] for s in succs)))
1143 tuple(repo[s] for s in succs)))
1144 if markers:
1144 if markers:
1145 obsolete.createmarkers(repo, markers)
1145 obsolete.createmarkers(repo, markers)
1146 else:
1146 else:
1147 cleanupnode(ui, repo, 'replaced', mapping)
1147 cleanupnode(ui, repo, 'replaced', mapping)
1148
1148
1149 state.clear()
1149 state.clear()
1150 if os.path.exists(repo.sjoin('undo')):
1150 if os.path.exists(repo.sjoin('undo')):
1151 os.unlink(repo.sjoin('undo'))
1151 os.unlink(repo.sjoin('undo'))
1152 if repo.vfs.exists('histedit-last-edit.txt'):
1152 if repo.vfs.exists('histedit-last-edit.txt'):
1153 repo.vfs.unlink('histedit-last-edit.txt')
1153 repo.vfs.unlink('histedit-last-edit.txt')
1154
1154
1155 def _aborthistedit(ui, repo, state):
1155 def _aborthistedit(ui, repo, state):
1156 try:
1156 try:
1157 state.read()
1157 state.read()
1158 __, leafs, tmpnodes, __ = processreplacement(state)
1158 __, leafs, tmpnodes, __ = processreplacement(state)
1159 ui.debug('restore wc to old parent %s\n'
1159 ui.debug('restore wc to old parent %s\n'
1160 % node.short(state.topmost))
1160 % node.short(state.topmost))
1161
1161
1162 # Recover our old commits if necessary
1162 # Recover our old commits if necessary
1163 if not state.topmost in repo and state.backupfile:
1163 if not state.topmost in repo and state.backupfile:
1164 backupfile = repo.join(state.backupfile)
1164 backupfile = repo.join(state.backupfile)
1165 f = hg.openpath(ui, backupfile)
1165 f = hg.openpath(ui, backupfile)
1166 gen = exchange.readbundle(ui, f, backupfile)
1166 gen = exchange.readbundle(ui, f, backupfile)
1167 with repo.transaction('histedit.abort') as tr:
1167 with repo.transaction('histedit.abort') as tr:
1168 if not isinstance(gen, bundle2.unbundle20):
1168 if not isinstance(gen, bundle2.unbundle20):
1169 gen.apply(repo, 'histedit', 'bundle:' + backupfile)
1169 gen.apply(repo, 'histedit', 'bundle:' + backupfile)
1170 if isinstance(gen, bundle2.unbundle20):
1170 if isinstance(gen, bundle2.unbundle20):
1171 bundle2.applybundle(repo, gen, tr,
1171 bundle2.applybundle(repo, gen, tr,
1172 source='histedit',
1172 source='histedit',
1173 url='bundle:' + backupfile)
1173 url='bundle:' + backupfile)
1174
1174
1175 os.remove(backupfile)
1175 os.remove(backupfile)
1176
1176
1177 # check whether we should update away
1177 # check whether we should update away
1178 if repo.unfiltered().revs('parents() and (%n or %ln::)',
1178 if repo.unfiltered().revs('parents() and (%n or %ln::)',
1179 state.parentctxnode, leafs | tmpnodes):
1179 state.parentctxnode, leafs | tmpnodes):
1180 hg.clean(repo, state.topmost, show_stats=True, quietempty=True)
1180 hg.clean(repo, state.topmost, show_stats=True, quietempty=True)
1181 cleanupnode(ui, repo, 'created', tmpnodes)
1181 cleanupnode(ui, repo, 'created', tmpnodes)
1182 cleanupnode(ui, repo, 'temp', leafs)
1182 cleanupnode(ui, repo, 'temp', leafs)
1183 except Exception:
1183 except Exception:
1184 if state.inprogress():
1184 if state.inprogress():
1185 ui.warn(_('warning: encountered an exception during histedit '
1185 ui.warn(_('warning: encountered an exception during histedit '
1186 '--abort; the repository may not have been completely '
1186 '--abort; the repository may not have been completely '
1187 'cleaned up\n'))
1187 'cleaned up\n'))
1188 raise
1188 raise
1189 finally:
1189 finally:
1190 state.clear()
1190 state.clear()
1191
1191
1192 def _edithisteditplan(ui, repo, state, rules):
1192 def _edithisteditplan(ui, repo, state, rules):
1193 state.read()
1193 state.read()
1194 if not rules:
1194 if not rules:
1195 comment = geteditcomment(ui,
1195 comment = geteditcomment(ui,
1196 node.short(state.parentctxnode),
1196 node.short(state.parentctxnode),
1197 node.short(state.topmost))
1197 node.short(state.topmost))
1198 rules = ruleeditor(repo, ui, state.actions, comment)
1198 rules = ruleeditor(repo, ui, state.actions, comment)
1199 else:
1199 else:
1200 rules = _readfile(rules)
1200 rules = _readfile(rules)
1201 actions = parserules(rules, state)
1201 actions = parserules(rules, state)
1202 ctxs = [repo[act.nodetoverify()] \
1202 ctxs = [repo[act.nodetoverify()] \
1203 for act in state.actions if act.nodetoverify()]
1203 for act in state.actions if act.nodetoverify()]
1204 warnverifyactions(ui, repo, actions, state, ctxs)
1204 warnverifyactions(ui, repo, actions, state, ctxs)
1205 state.actions = actions
1205 state.actions = actions
1206 state.write()
1206 state.write()
1207
1207
1208 def _newhistedit(ui, repo, state, revs, freeargs, opts):
1208 def _newhistedit(ui, repo, state, revs, freeargs, opts):
1209 outg = opts.get('outgoing')
1209 outg = opts.get('outgoing')
1210 rules = opts.get('commands', '')
1210 rules = opts.get('commands', '')
1211 force = opts.get('force')
1211 force = opts.get('force')
1212
1212
1213 cmdutil.checkunfinished(repo)
1213 cmdutil.checkunfinished(repo)
1214 cmdutil.bailifchanged(repo)
1214 cmdutil.bailifchanged(repo)
1215
1215
1216 topmost, empty = repo.dirstate.parents()
1216 topmost, empty = repo.dirstate.parents()
1217 if outg:
1217 if outg:
1218 if freeargs:
1218 if freeargs:
1219 remote = freeargs[0]
1219 remote = freeargs[0]
1220 else:
1220 else:
1221 remote = None
1221 remote = None
1222 root = findoutgoing(ui, repo, remote, force, opts)
1222 root = findoutgoing(ui, repo, remote, force, opts)
1223 else:
1223 else:
1224 rr = list(repo.set('roots(%ld)', scmutil.revrange(repo, revs)))
1224 rr = list(repo.set('roots(%ld)', scmutil.revrange(repo, revs)))
1225 if len(rr) != 1:
1225 if len(rr) != 1:
1226 raise error.Abort(_('The specified revisions must have '
1226 raise error.Abort(_('The specified revisions must have '
1227 'exactly one common root'))
1227 'exactly one common root'))
1228 root = rr[0].node()
1228 root = rr[0].node()
1229
1229
1230 revs = between(repo, root, topmost, state.keep)
1230 revs = between(repo, root, topmost, state.keep)
1231 if not revs:
1231 if not revs:
1232 raise error.Abort(_('%s is not an ancestor of working directory') %
1232 raise error.Abort(_('%s is not an ancestor of working directory') %
1233 node.short(root))
1233 node.short(root))
1234
1234
1235 ctxs = [repo[r] for r in revs]
1235 ctxs = [repo[r] for r in revs]
1236 if not rules:
1236 if not rules:
1237 comment = geteditcomment(ui, node.short(root), node.short(topmost))
1237 comment = geteditcomment(ui, node.short(root), node.short(topmost))
1238 actions = [pick(state, r) for r in revs]
1238 actions = [pick(state, r) for r in revs]
1239 rules = ruleeditor(repo, ui, actions, comment)
1239 rules = ruleeditor(repo, ui, actions, comment)
1240 else:
1240 else:
1241 rules = _readfile(rules)
1241 rules = _readfile(rules)
1242 actions = parserules(rules, state)
1242 actions = parserules(rules, state)
1243 warnverifyactions(ui, repo, actions, state, ctxs)
1243 warnverifyactions(ui, repo, actions, state, ctxs)
1244
1244
1245 parentctxnode = repo[root].parents()[0].node()
1245 parentctxnode = repo[root].parents()[0].node()
1246
1246
1247 state.parentctxnode = parentctxnode
1247 state.parentctxnode = parentctxnode
1248 state.actions = actions
1248 state.actions = actions
1249 state.topmost = topmost
1249 state.topmost = topmost
1250 state.replacements = []
1250 state.replacements = []
1251
1251
1252 # Create a backup so we can always abort completely.
1252 # Create a backup so we can always abort completely.
1253 backupfile = None
1253 backupfile = None
1254 if not obsolete.isenabled(repo, obsolete.createmarkersopt):
1254 if not obsolete.isenabled(repo, obsolete.createmarkersopt):
1255 backupfile = repair._bundle(repo, [parentctxnode], [topmost], root,
1255 backupfile = repair._bundle(repo, [parentctxnode], [topmost], root,
1256 'histedit')
1256 'histedit')
1257 state.backupfile = backupfile
1257 state.backupfile = backupfile
1258
1258
1259 def _getsummary(ctx):
1259 def _getsummary(ctx):
1260 # a common pattern is to extract the summary but default to the empty
1260 # a common pattern is to extract the summary but default to the empty
1261 # string
1261 # string
1262 summary = ctx.description() or ''
1262 summary = ctx.description() or ''
1263 if summary:
1263 if summary:
1264 summary = summary.splitlines()[0]
1264 summary = summary.splitlines()[0]
1265 return summary
1265 return summary
1266
1266
1267 def bootstrapcontinue(ui, state, opts):
1267 def bootstrapcontinue(ui, state, opts):
1268 repo = state.repo
1268 repo = state.repo
1269 if state.actions:
1269 if state.actions:
1270 actobj = state.actions.pop(0)
1270 actobj = state.actions.pop(0)
1271
1271
1272 if _isdirtywc(repo):
1272 if _isdirtywc(repo):
1273 actobj.continuedirty()
1273 actobj.continuedirty()
1274 if _isdirtywc(repo):
1274 if _isdirtywc(repo):
1275 abortdirty()
1275 abortdirty()
1276
1276
1277 parentctx, replacements = actobj.continueclean()
1277 parentctx, replacements = actobj.continueclean()
1278
1278
1279 state.parentctxnode = parentctx.node()
1279 state.parentctxnode = parentctx.node()
1280 state.replacements.extend(replacements)
1280 state.replacements.extend(replacements)
1281
1281
1282 return state
1282 return state
1283
1283
1284 def between(repo, old, new, keep):
1284 def between(repo, old, new, keep):
1285 """select and validate the set of revision to edit
1285 """select and validate the set of revision to edit
1286
1286
1287 When keep is false, the specified set can't have children."""
1287 When keep is false, the specified set can't have children."""
1288 ctxs = list(repo.set('%n::%n', old, new))
1288 ctxs = list(repo.set('%n::%n', old, new))
1289 if ctxs and not keep:
1289 if ctxs and not keep:
1290 if (not obsolete.isenabled(repo, obsolete.allowunstableopt) and
1290 if (not obsolete.isenabled(repo, obsolete.allowunstableopt) and
1291 repo.revs('(%ld::) - (%ld)', ctxs, ctxs)):
1291 repo.revs('(%ld::) - (%ld)', ctxs, ctxs)):
1292 raise error.Abort(_('can only histedit a changeset together '
1292 raise error.Abort(_('can only histedit a changeset together '
1293 'with all its descendants'))
1293 'with all its descendants'))
1294 if repo.revs('(%ld) and merge()', ctxs):
1294 if repo.revs('(%ld) and merge()', ctxs):
1295 raise error.Abort(_('cannot edit history that contains merges'))
1295 raise error.Abort(_('cannot edit history that contains merges'))
1296 root = ctxs[0] # list is already sorted by repo.set
1296 root = ctxs[0] # list is already sorted by repo.set
1297 if not root.mutable():
1297 if not root.mutable():
1298 raise error.Abort(_('cannot edit public changeset: %s') % root,
1298 raise error.Abort(_('cannot edit public changeset: %s') % root,
1299 hint=_('see "hg help phases" for details'))
1299 hint=_('see "hg help phases" for details'))
1300 return [c.node() for c in ctxs]
1300 return [c.node() for c in ctxs]
1301
1301
1302 def ruleeditor(repo, ui, actions, editcomment=""):
1302 def ruleeditor(repo, ui, actions, editcomment=""):
1303 """open an editor to edit rules
1303 """open an editor to edit rules
1304
1304
1305 rules are in the format [ [act, ctx], ...] like in state.rules
1305 rules are in the format [ [act, ctx], ...] like in state.rules
1306 """
1306 """
1307 if repo.ui.configbool("experimental", "histedit.autoverb"):
1307 if repo.ui.configbool("experimental", "histedit.autoverb"):
1308 newact = util.sortdict()
1308 newact = util.sortdict()
1309 for act in actions:
1309 for act in actions:
1310 ctx = repo[act.node]
1310 ctx = repo[act.node]
1311 summary = _getsummary(ctx)
1311 summary = _getsummary(ctx)
1312 fword = summary.split(' ', 1)[0].lower()
1312 fword = summary.split(' ', 1)[0].lower()
1313 added = False
1313 added = False
1314
1314
1315 # if it doesn't end with the special character '!' just skip this
1315 # if it doesn't end with the special character '!' just skip this
1316 if fword.endswith('!'):
1316 if fword.endswith('!'):
1317 fword = fword[:-1]
1317 fword = fword[:-1]
1318 if fword in primaryactions | secondaryactions | tertiaryactions:
1318 if fword in primaryactions | secondaryactions | tertiaryactions:
1319 act.verb = fword
1319 act.verb = fword
1320 # get the target summary
1320 # get the target summary
1321 tsum = summary[len(fword) + 1:].lstrip()
1321 tsum = summary[len(fword) + 1:].lstrip()
1322 # safe but slow: reverse iterate over the actions so we
1322 # safe but slow: reverse iterate over the actions so we
1323 # don't clash on two commits having the same summary
1323 # don't clash on two commits having the same summary
1324 for na, l in reversed(list(newact.iteritems())):
1324 for na, l in reversed(list(newact.iteritems())):
1325 actx = repo[na.node]
1325 actx = repo[na.node]
1326 asum = _getsummary(actx)
1326 asum = _getsummary(actx)
1327 if asum == tsum:
1327 if asum == tsum:
1328 added = True
1328 added = True
1329 l.append(act)
1329 l.append(act)
1330 break
1330 break
1331
1331
1332 if not added:
1332 if not added:
1333 newact[act] = []
1333 newact[act] = []
1334
1334
1335 # copy over and flatten the new list
1335 # copy over and flatten the new list
1336 actions = []
1336 actions = []
1337 for na, l in newact.iteritems():
1337 for na, l in newact.iteritems():
1338 actions.append(na)
1338 actions.append(na)
1339 actions += l
1339 actions += l
1340
1340
1341 rules = '\n'.join([act.torule() for act in actions])
1341 rules = '\n'.join([act.torule() for act in actions])
1342 rules += '\n\n'
1342 rules += '\n\n'
1343 rules += editcomment
1343 rules += editcomment
1344 rules = ui.edit(rules, ui.username(), {'prefix': 'histedit'})
1344 rules = ui.edit(rules, ui.username(), {'prefix': 'histedit'})
1345
1345
1346 # Save edit rules in .hg/histedit-last-edit.txt in case
1346 # Save edit rules in .hg/histedit-last-edit.txt in case
1347 # the user needs to ask for help after something
1347 # the user needs to ask for help after something
1348 # surprising happens.
1348 # surprising happens.
1349 f = open(repo.join('histedit-last-edit.txt'), 'w')
1349 f = open(repo.join('histedit-last-edit.txt'), 'w')
1350 f.write(rules)
1350 f.write(rules)
1351 f.close()
1351 f.close()
1352
1352
1353 return rules
1353 return rules
1354
1354
1355 def parserules(rules, state):
1355 def parserules(rules, state):
1356 """Read the histedit rules string and return list of action objects """
1356 """Read the histedit rules string and return list of action objects """
1357 rules = [l for l in (r.strip() for r in rules.splitlines())
1357 rules = [l for l in (r.strip() for r in rules.splitlines())
1358 if l and not l.startswith('#')]
1358 if l and not l.startswith('#')]
1359 actions = []
1359 actions = []
1360 for r in rules:
1360 for r in rules:
1361 if ' ' not in r:
1361 if ' ' not in r:
1362 raise error.ParseError(_('malformed line "%s"') % r)
1362 raise error.ParseError(_('malformed line "%s"') % r)
1363 verb, rest = r.split(' ', 1)
1363 verb, rest = r.split(' ', 1)
1364
1364
1365 if verb not in actiontable:
1365 if verb not in actiontable:
1366 raise error.ParseError(_('unknown action "%s"') % verb)
1366 raise error.ParseError(_('unknown action "%s"') % verb)
1367
1367
1368 action = actiontable[verb].fromrule(state, rest)
1368 action = actiontable[verb].fromrule(state, rest)
1369 actions.append(action)
1369 actions.append(action)
1370 return actions
1370 return actions
1371
1371
1372 def warnverifyactions(ui, repo, actions, state, ctxs):
1372 def warnverifyactions(ui, repo, actions, state, ctxs):
1373 try:
1373 try:
1374 verifyactions(actions, state, ctxs)
1374 verifyactions(actions, state, ctxs)
1375 except error.ParseError:
1375 except error.ParseError:
1376 if repo.vfs.exists('histedit-last-edit.txt'):
1376 if repo.vfs.exists('histedit-last-edit.txt'):
1377 ui.warn(_('warning: histedit rules saved '
1377 ui.warn(_('warning: histedit rules saved '
1378 'to: .hg/histedit-last-edit.txt\n'))
1378 'to: .hg/histedit-last-edit.txt\n'))
1379 raise
1379 raise
1380
1380
1381 def verifyactions(actions, state, ctxs):
1381 def verifyactions(actions, state, ctxs):
1382 """Verify that there exists exactly one action per given changeset and
1382 """Verify that there exists exactly one action per given changeset and
1383 other constraints.
1383 other constraints.
1384
1384
1385 Will abort if there are to many or too few rules, a malformed rule,
1385 Will abort if there are to many or too few rules, a malformed rule,
1386 or a rule on a changeset outside of the user-given range.
1386 or a rule on a changeset outside of the user-given range.
1387 """
1387 """
1388 expected = set(c.hex() for c in ctxs)
1388 expected = set(c.hex() for c in ctxs)
1389 seen = set()
1389 seen = set()
1390 prev = None
1390 prev = None
1391 for action in actions:
1391 for action in actions:
1392 action.verify(prev)
1392 action.verify(prev)
1393 prev = action
1393 prev = action
1394 constraints = action.constraints()
1394 constraints = action.constraints()
1395 for constraint in constraints:
1395 for constraint in constraints:
1396 if constraint not in _constraints.known():
1396 if constraint not in _constraints.known():
1397 raise error.ParseError(_('unknown constraint "%s"') %
1397 raise error.ParseError(_('unknown constraint "%s"') %
1398 constraint)
1398 constraint)
1399
1399
1400 nodetoverify = action.nodetoverify()
1400 nodetoverify = action.nodetoverify()
1401 if nodetoverify is not None:
1401 if nodetoverify is not None:
1402 ha = node.hex(nodetoverify)
1402 ha = node.hex(nodetoverify)
1403 if _constraints.noother in constraints and ha not in expected:
1403 if _constraints.noother in constraints and ha not in expected:
1404 raise error.ParseError(
1404 raise error.ParseError(
1405 _('%s "%s" changeset was not a candidate')
1405 _('%s "%s" changeset was not a candidate')
1406 % (action.verb, ha[:12]),
1406 % (action.verb, ha[:12]),
1407 hint=_('only use listed changesets'))
1407 hint=_('only use listed changesets'))
1408 if _constraints.forceother in constraints and ha in expected:
1408 if _constraints.forceother in constraints and ha in expected:
1409 raise error.ParseError(
1409 raise error.ParseError(
1410 _('%s "%s" changeset was not an edited list candidate')
1410 _('%s "%s" changeset was not an edited list candidate')
1411 % (action.verb, ha[:12]),
1411 % (action.verb, ha[:12]),
1412 hint=_('only use listed changesets'))
1412 hint=_('only use listed changesets'))
1413 if _constraints.noduplicates in constraints and ha in seen:
1413 if _constraints.noduplicates in constraints and ha in seen:
1414 raise error.ParseError(_(
1414 raise error.ParseError(_(
1415 'duplicated command for changeset %s') %
1415 'duplicated command for changeset %s') %
1416 ha[:12])
1416 ha[:12])
1417 seen.add(ha)
1417 seen.add(ha)
1418 missing = sorted(expected - seen) # sort to stabilize output
1418 missing = sorted(expected - seen) # sort to stabilize output
1419
1419
1420 if state.repo.ui.configbool('histedit', 'dropmissing'):
1420 if state.repo.ui.configbool('histedit', 'dropmissing'):
1421 if len(actions) == 0:
1421 if len(actions) == 0:
1422 raise error.ParseError(_('no rules provided'),
1422 raise error.ParseError(_('no rules provided'),
1423 hint=_('use strip extension to remove commits'))
1423 hint=_('use strip extension to remove commits'))
1424
1424
1425 drops = [drop(state, node.bin(n)) for n in missing]
1425 drops = [drop(state, node.bin(n)) for n in missing]
1426 # put the in the beginning so they execute immediately and
1426 # put the in the beginning so they execute immediately and
1427 # don't show in the edit-plan in the future
1427 # don't show in the edit-plan in the future
1428 actions[:0] = drops
1428 actions[:0] = drops
1429 elif missing:
1429 elif missing:
1430 raise error.ParseError(_('missing rules for changeset %s') %
1430 raise error.ParseError(_('missing rules for changeset %s') %
1431 missing[0][:12],
1431 missing[0][:12],
1432 hint=_('use "drop %s" to discard, see also: '
1432 hint=_('use "drop %s" to discard, see also: '
1433 '"hg help -e histedit.config"') % missing[0][:12])
1433 '"hg help -e histedit.config"') % missing[0][:12])
1434
1434
1435 def adjustreplacementsfrommarkers(repo, oldreplacements):
1435 def adjustreplacementsfrommarkers(repo, oldreplacements):
1436 """Adjust replacements from obsolescense markers
1436 """Adjust replacements from obsolescense markers
1437
1437
1438 Replacements structure is originally generated based on
1438 Replacements structure is originally generated based on
1439 histedit's state and does not account for changes that are
1439 histedit's state and does not account for changes that are
1440 not recorded there. This function fixes that by adding
1440 not recorded there. This function fixes that by adding
1441 data read from obsolescense markers"""
1441 data read from obsolescense markers"""
1442 if not obsolete.isenabled(repo, obsolete.createmarkersopt):
1442 if not obsolete.isenabled(repo, obsolete.createmarkersopt):
1443 return oldreplacements
1443 return oldreplacements
1444
1444
1445 unfi = repo.unfiltered()
1445 unfi = repo.unfiltered()
1446 nm = unfi.changelog.nodemap
1446 nm = unfi.changelog.nodemap
1447 obsstore = repo.obsstore
1447 obsstore = repo.obsstore
1448 newreplacements = list(oldreplacements)
1448 newreplacements = list(oldreplacements)
1449 oldsuccs = [r[1] for r in oldreplacements]
1449 oldsuccs = [r[1] for r in oldreplacements]
1450 # successors that have already been added to succstocheck once
1450 # successors that have already been added to succstocheck once
1451 seensuccs = set().union(*oldsuccs) # create a set from an iterable of tuples
1451 seensuccs = set().union(*oldsuccs) # create a set from an iterable of tuples
1452 succstocheck = list(seensuccs)
1452 succstocheck = list(seensuccs)
1453 while succstocheck:
1453 while succstocheck:
1454 n = succstocheck.pop()
1454 n = succstocheck.pop()
1455 missing = nm.get(n) is None
1455 missing = nm.get(n) is None
1456 markers = obsstore.successors.get(n, ())
1456 markers = obsstore.successors.get(n, ())
1457 if missing and not markers:
1457 if missing and not markers:
1458 # dead end, mark it as such
1458 # dead end, mark it as such
1459 newreplacements.append((n, ()))
1459 newreplacements.append((n, ()))
1460 for marker in markers:
1460 for marker in markers:
1461 nsuccs = marker[1]
1461 nsuccs = marker[1]
1462 newreplacements.append((n, nsuccs))
1462 newreplacements.append((n, nsuccs))
1463 for nsucc in nsuccs:
1463 for nsucc in nsuccs:
1464 if nsucc not in seensuccs:
1464 if nsucc not in seensuccs:
1465 seensuccs.add(nsucc)
1465 seensuccs.add(nsucc)
1466 succstocheck.append(nsucc)
1466 succstocheck.append(nsucc)
1467
1467
1468 return newreplacements
1468 return newreplacements
1469
1469
1470 def processreplacement(state):
1470 def processreplacement(state):
1471 """process the list of replacements to return
1471 """process the list of replacements to return
1472
1472
1473 1) the final mapping between original and created nodes
1473 1) the final mapping between original and created nodes
1474 2) the list of temporary node created by histedit
1474 2) the list of temporary node created by histedit
1475 3) the list of new commit created by histedit"""
1475 3) the list of new commit created by histedit"""
1476 replacements = adjustreplacementsfrommarkers(state.repo, state.replacements)
1476 replacements = adjustreplacementsfrommarkers(state.repo, state.replacements)
1477 allsuccs = set()
1477 allsuccs = set()
1478 replaced = set()
1478 replaced = set()
1479 fullmapping = {}
1479 fullmapping = {}
1480 # initialize basic set
1480 # initialize basic set
1481 # fullmapping records all operations recorded in replacement
1481 # fullmapping records all operations recorded in replacement
1482 for rep in replacements:
1482 for rep in replacements:
1483 allsuccs.update(rep[1])
1483 allsuccs.update(rep[1])
1484 replaced.add(rep[0])
1484 replaced.add(rep[0])
1485 fullmapping.setdefault(rep[0], set()).update(rep[1])
1485 fullmapping.setdefault(rep[0], set()).update(rep[1])
1486 new = allsuccs - replaced
1486 new = allsuccs - replaced
1487 tmpnodes = allsuccs & replaced
1487 tmpnodes = allsuccs & replaced
1488 # Reduce content fullmapping into direct relation between original nodes
1488 # Reduce content fullmapping into direct relation between original nodes
1489 # and final node created during history edition
1489 # and final node created during history edition
1490 # Dropped changeset are replaced by an empty list
1490 # Dropped changeset are replaced by an empty list
1491 toproceed = set(fullmapping)
1491 toproceed = set(fullmapping)
1492 final = {}
1492 final = {}
1493 while toproceed:
1493 while toproceed:
1494 for x in list(toproceed):
1494 for x in list(toproceed):
1495 succs = fullmapping[x]
1495 succs = fullmapping[x]
1496 for s in list(succs):
1496 for s in list(succs):
1497 if s in toproceed:
1497 if s in toproceed:
1498 # non final node with unknown closure
1498 # non final node with unknown closure
1499 # We can't process this now
1499 # We can't process this now
1500 break
1500 break
1501 elif s in final:
1501 elif s in final:
1502 # non final node, replace with closure
1502 # non final node, replace with closure
1503 succs.remove(s)
1503 succs.remove(s)
1504 succs.update(final[s])
1504 succs.update(final[s])
1505 else:
1505 else:
1506 final[x] = succs
1506 final[x] = succs
1507 toproceed.remove(x)
1507 toproceed.remove(x)
1508 # remove tmpnodes from final mapping
1508 # remove tmpnodes from final mapping
1509 for n in tmpnodes:
1509 for n in tmpnodes:
1510 del final[n]
1510 del final[n]
1511 # we expect all changes involved in final to exist in the repo
1511 # we expect all changes involved in final to exist in the repo
1512 # turn `final` into list (topologically sorted)
1512 # turn `final` into list (topologically sorted)
1513 nm = state.repo.changelog.nodemap
1513 nm = state.repo.changelog.nodemap
1514 for prec, succs in final.items():
1514 for prec, succs in final.items():
1515 final[prec] = sorted(succs, key=nm.get)
1515 final[prec] = sorted(succs, key=nm.get)
1516
1516
1517 # computed topmost element (necessary for bookmark)
1517 # computed topmost element (necessary for bookmark)
1518 if new:
1518 if new:
1519 newtopmost = sorted(new, key=state.repo.changelog.rev)[-1]
1519 newtopmost = sorted(new, key=state.repo.changelog.rev)[-1]
1520 elif not final:
1520 elif not final:
1521 # Nothing rewritten at all. we won't need `newtopmost`
1521 # Nothing rewritten at all. we won't need `newtopmost`
1522 # It is the same as `oldtopmost` and `processreplacement` know it
1522 # It is the same as `oldtopmost` and `processreplacement` know it
1523 newtopmost = None
1523 newtopmost = None
1524 else:
1524 else:
1525 # every body died. The newtopmost is the parent of the root.
1525 # every body died. The newtopmost is the parent of the root.
1526 r = state.repo.changelog.rev
1526 r = state.repo.changelog.rev
1527 newtopmost = state.repo[sorted(final, key=r)[0]].p1().node()
1527 newtopmost = state.repo[sorted(final, key=r)[0]].p1().node()
1528
1528
1529 return final, tmpnodes, new, newtopmost
1529 return final, tmpnodes, new, newtopmost
1530
1530
1531 def movebookmarks(ui, repo, mapping, oldtopmost, newtopmost):
1531 def movebookmarks(ui, repo, mapping, oldtopmost, newtopmost):
1532 """Move bookmark from old to newly created node"""
1532 """Move bookmark from old to newly created node"""
1533 if not mapping:
1533 if not mapping:
1534 # if nothing got rewritten there is not purpose for this function
1534 # if nothing got rewritten there is not purpose for this function
1535 return
1535 return
1536 moves = []
1536 moves = []
1537 for bk, old in sorted(repo._bookmarks.iteritems()):
1537 for bk, old in sorted(repo._bookmarks.iteritems()):
1538 if old == oldtopmost:
1538 if old == oldtopmost:
1539 # special case ensure bookmark stay on tip.
1539 # special case ensure bookmark stay on tip.
1540 #
1540 #
1541 # This is arguably a feature and we may only want that for the
1541 # This is arguably a feature and we may only want that for the
1542 # active bookmark. But the behavior is kept compatible with the old
1542 # active bookmark. But the behavior is kept compatible with the old
1543 # version for now.
1543 # version for now.
1544 moves.append((bk, newtopmost))
1544 moves.append((bk, newtopmost))
1545 continue
1545 continue
1546 base = old
1546 base = old
1547 new = mapping.get(base, None)
1547 new = mapping.get(base, None)
1548 if new is None:
1548 if new is None:
1549 continue
1549 continue
1550 while not new:
1550 while not new:
1551 # base is killed, trying with parent
1551 # base is killed, trying with parent
1552 base = repo[base].p1().node()
1552 base = repo[base].p1().node()
1553 new = mapping.get(base, (base,))
1553 new = mapping.get(base, (base,))
1554 # nothing to move
1554 # nothing to move
1555 moves.append((bk, new[-1]))
1555 moves.append((bk, new[-1]))
1556 if moves:
1556 if moves:
1557 lock = tr = None
1557 lock = tr = None
1558 try:
1558 try:
1559 lock = repo.lock()
1559 lock = repo.lock()
1560 tr = repo.transaction('histedit')
1560 tr = repo.transaction('histedit')
1561 marks = repo._bookmarks
1561 marks = repo._bookmarks
1562 for mark, new in moves:
1562 for mark, new in moves:
1563 old = marks[mark]
1563 old = marks[mark]
1564 ui.note(_('histedit: moving bookmarks %s from %s to %s\n')
1564 ui.note(_('histedit: moving bookmarks %s from %s to %s\n')
1565 % (mark, node.short(old), node.short(new)))
1565 % (mark, node.short(old), node.short(new)))
1566 marks[mark] = new
1566 marks[mark] = new
1567 marks.recordchange(tr)
1567 marks.recordchange(tr)
1568 tr.close()
1568 tr.close()
1569 finally:
1569 finally:
1570 release(tr, lock)
1570 release(tr, lock)
1571
1571
1572 def cleanupnode(ui, repo, name, nodes):
1572 def cleanupnode(ui, repo, name, nodes):
1573 """strip a group of nodes from the repository
1573 """strip a group of nodes from the repository
1574
1574
1575 The set of node to strip may contains unknown nodes."""
1575 The set of node to strip may contains unknown nodes."""
1576 ui.debug('should strip %s nodes %s\n' %
1576 ui.debug('should strip %s nodes %s\n' %
1577 (name, ', '.join([node.short(n) for n in nodes])))
1577 (name, ', '.join([node.short(n) for n in nodes])))
1578 with repo.lock():
1578 with repo.lock():
1579 # do not let filtering get in the way of the cleanse
1579 # do not let filtering get in the way of the cleanse
1580 # we should probably get rid of obsolescence marker created during the
1580 # we should probably get rid of obsolescence marker created during the
1581 # histedit, but we currently do not have such information.
1581 # histedit, but we currently do not have such information.
1582 repo = repo.unfiltered()
1582 repo = repo.unfiltered()
1583 # Find all nodes that need to be stripped
1583 # Find all nodes that need to be stripped
1584 # (we use %lr instead of %ln to silently ignore unknown items)
1584 # (we use %lr instead of %ln to silently ignore unknown items)
1585 nm = repo.changelog.nodemap
1585 nm = repo.changelog.nodemap
1586 nodes = sorted(n for n in nodes if n in nm)
1586 nodes = sorted(n for n in nodes if n in nm)
1587 roots = [c.node() for c in repo.set("roots(%ln)", nodes)]
1587 roots = [c.node() for c in repo.set("roots(%ln)", nodes)]
1588 for c in roots:
1588 for c in roots:
1589 # We should process node in reverse order to strip tip most first.
1589 # We should process node in reverse order to strip tip most first.
1590 # but this trigger a bug in changegroup hook.
1590 # but this trigger a bug in changegroup hook.
1591 # This would reduce bundle overhead
1591 # This would reduce bundle overhead
1592 repair.strip(ui, repo, c)
1592 repair.strip(ui, repo, c)
1593
1593
1594 def stripwrapper(orig, ui, repo, nodelist, *args, **kwargs):
1594 def stripwrapper(orig, ui, repo, nodelist, *args, **kwargs):
1595 if isinstance(nodelist, str):
1595 if isinstance(nodelist, str):
1596 nodelist = [nodelist]
1596 nodelist = [nodelist]
1597 if os.path.exists(os.path.join(repo.path, 'histedit-state')):
1597 if os.path.exists(os.path.join(repo.path, 'histedit-state')):
1598 state = histeditstate(repo)
1598 state = histeditstate(repo)
1599 state.read()
1599 state.read()
1600 histedit_nodes = set([action.nodetoverify() for action
1600 histedit_nodes = set([action.nodetoverify() for action
1601 in state.actions if action.nodetoverify()])
1601 in state.actions if action.nodetoverify()])
1602 strip_nodes = set([repo[n].node() for n in nodelist])
1602 strip_nodes = set([repo[n].node() for n in nodelist])
1603 common_nodes = histedit_nodes & strip_nodes
1603 common_nodes = histedit_nodes & strip_nodes
1604 if common_nodes:
1604 if common_nodes:
1605 raise error.Abort(_("histedit in progress, can't strip %s")
1605 raise error.Abort(_("histedit in progress, can't strip %s")
1606 % ', '.join(node.short(x) for x in common_nodes))
1606 % ', '.join(node.short(x) for x in common_nodes))
1607 return orig(ui, repo, nodelist, *args, **kwargs)
1607 return orig(ui, repo, nodelist, *args, **kwargs)
1608
1608
1609 extensions.wrapfunction(repair, 'strip', stripwrapper)
1609 extensions.wrapfunction(repair, 'strip', stripwrapper)
1610
1610
1611 def summaryhook(ui, repo):
1611 def summaryhook(ui, repo):
1612 if not os.path.exists(repo.join('histedit-state')):
1612 if not os.path.exists(repo.join('histedit-state')):
1613 return
1613 return
1614 state = histeditstate(repo)
1614 state = histeditstate(repo)
1615 state.read()
1615 state.read()
1616 if state.actions:
1616 if state.actions:
1617 # i18n: column positioning for "hg summary"
1617 # i18n: column positioning for "hg summary"
1618 ui.write(_('hist: %s (histedit --continue)\n') %
1618 ui.write(_('hist: %s (histedit --continue)\n') %
1619 (ui.label(_('%d remaining'), 'histedit.remaining') %
1619 (ui.label(_('%d remaining'), 'histedit.remaining') %
1620 len(state.actions)))
1620 len(state.actions)))
1621
1621
1622 def extsetup(ui):
1622 def extsetup(ui):
1623 cmdutil.summaryhooks.add('histedit', summaryhook)
1623 cmdutil.summaryhooks.add('histedit', summaryhook)
1624 cmdutil.unfinishedstates.append(
1624 cmdutil.unfinishedstates.append(
1625 ['histedit-state', False, True, _('histedit in progress'),
1625 ['histedit-state', False, True, _('histedit in progress'),
1626 _("use 'hg histedit --continue' or 'hg histedit --abort'")])
1626 _("use 'hg histedit --continue' or 'hg histedit --abort'")])
1627 cmdutil.afterresolvedstates.append(
1627 cmdutil.afterresolvedstates.append(
1628 ['histedit-state', _('hg histedit --continue')])
1628 ['histedit-state', _('hg histedit --continue')])
1629 if ui.configbool("experimental", "histeditng"):
1629 if ui.configbool("experimental", "histeditng"):
1630 globals()['base'] = action(['base', 'b'],
1630 globals()['base'] = action(['base', 'b'],
1631 _('checkout changeset and apply further changesets from there')
1631 _('checkout changeset and apply further changesets from there')
1632 )(base)
1632 )(base)
@@ -1,491 +1,491 b''
1 # journal.py
1 # journal.py
2 #
2 #
3 # Copyright 2014-2016 Facebook, Inc.
3 # Copyright 2014-2016 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 """Track previous positions of bookmarks (EXPERIMENTAL)
7 """Track previous positions of bookmarks (EXPERIMENTAL)
8
8
9 This extension adds a new command: `hg journal`, which shows you where
9 This extension adds a new command: `hg journal`, which shows you where
10 bookmarks were previously located.
10 bookmarks were previously located.
11
11
12 """
12 """
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import collections
16 import collections
17 import errno
17 import errno
18 import os
18 import os
19 import weakref
19 import weakref
20
20
21 from mercurial.i18n import _
21 from mercurial.i18n import _
22
22
23 from mercurial import (
23 from mercurial import (
24 bookmarks,
24 bookmarks,
25 cmdutil,
25 cmdutil,
26 commands,
26 commands,
27 dispatch,
27 dispatch,
28 error,
28 error,
29 extensions,
29 extensions,
30 hg,
30 hg,
31 localrepo,
31 localrepo,
32 lock,
32 lock,
33 node,
33 node,
34 util,
34 util,
35 )
35 )
36
36
37 from . import share
37 from . import share
38
38
39 cmdtable = {}
39 cmdtable = {}
40 command = cmdutil.command(cmdtable)
40 command = cmdutil.command(cmdtable)
41
41
42 # Note for extension authors: ONLY specify testedwith = 'internal' for
42 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
43 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
43 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
44 # be specifying the version(s) of Mercurial they are tested with, or
44 # be specifying the version(s) of Mercurial they are tested with, or
45 # leave the attribute unspecified.
45 # leave the attribute unspecified.
46 testedwith = 'internal'
46 testedwith = 'ships-with-hg-core'
47
47
48 # storage format version; increment when the format changes
48 # storage format version; increment when the format changes
49 storageversion = 0
49 storageversion = 0
50
50
51 # namespaces
51 # namespaces
52 bookmarktype = 'bookmark'
52 bookmarktype = 'bookmark'
53 wdirparenttype = 'wdirparent'
53 wdirparenttype = 'wdirparent'
54 # In a shared repository, what shared feature name is used
54 # In a shared repository, what shared feature name is used
55 # to indicate this namespace is shared with the source?
55 # to indicate this namespace is shared with the source?
56 sharednamespaces = {
56 sharednamespaces = {
57 bookmarktype: hg.sharedbookmarks,
57 bookmarktype: hg.sharedbookmarks,
58 }
58 }
59
59
60 # Journal recording, register hooks and storage object
60 # Journal recording, register hooks and storage object
61 def extsetup(ui):
61 def extsetup(ui):
62 extensions.wrapfunction(dispatch, 'runcommand', runcommand)
62 extensions.wrapfunction(dispatch, 'runcommand', runcommand)
63 extensions.wrapfunction(bookmarks.bmstore, '_write', recordbookmarks)
63 extensions.wrapfunction(bookmarks.bmstore, '_write', recordbookmarks)
64 extensions.wrapfunction(
64 extensions.wrapfunction(
65 localrepo.localrepository.dirstate, 'func', wrapdirstate)
65 localrepo.localrepository.dirstate, 'func', wrapdirstate)
66 extensions.wrapfunction(hg, 'postshare', wrappostshare)
66 extensions.wrapfunction(hg, 'postshare', wrappostshare)
67 extensions.wrapfunction(hg, 'copystore', unsharejournal)
67 extensions.wrapfunction(hg, 'copystore', unsharejournal)
68
68
69 def reposetup(ui, repo):
69 def reposetup(ui, repo):
70 if repo.local():
70 if repo.local():
71 repo.journal = journalstorage(repo)
71 repo.journal = journalstorage(repo)
72
72
73 def runcommand(orig, lui, repo, cmd, fullargs, *args):
73 def runcommand(orig, lui, repo, cmd, fullargs, *args):
74 """Track the command line options for recording in the journal"""
74 """Track the command line options for recording in the journal"""
75 journalstorage.recordcommand(*fullargs)
75 journalstorage.recordcommand(*fullargs)
76 return orig(lui, repo, cmd, fullargs, *args)
76 return orig(lui, repo, cmd, fullargs, *args)
77
77
78 # hooks to record dirstate changes
78 # hooks to record dirstate changes
79 def wrapdirstate(orig, repo):
79 def wrapdirstate(orig, repo):
80 """Make journal storage available to the dirstate object"""
80 """Make journal storage available to the dirstate object"""
81 dirstate = orig(repo)
81 dirstate = orig(repo)
82 if util.safehasattr(repo, 'journal'):
82 if util.safehasattr(repo, 'journal'):
83 dirstate.journalstorage = repo.journal
83 dirstate.journalstorage = repo.journal
84 dirstate.addparentchangecallback('journal', recorddirstateparents)
84 dirstate.addparentchangecallback('journal', recorddirstateparents)
85 return dirstate
85 return dirstate
86
86
87 def recorddirstateparents(dirstate, old, new):
87 def recorddirstateparents(dirstate, old, new):
88 """Records all dirstate parent changes in the journal."""
88 """Records all dirstate parent changes in the journal."""
89 old = list(old)
89 old = list(old)
90 new = list(new)
90 new = list(new)
91 if util.safehasattr(dirstate, 'journalstorage'):
91 if util.safehasattr(dirstate, 'journalstorage'):
92 # only record two hashes if there was a merge
92 # only record two hashes if there was a merge
93 oldhashes = old[:1] if old[1] == node.nullid else old
93 oldhashes = old[:1] if old[1] == node.nullid else old
94 newhashes = new[:1] if new[1] == node.nullid else new
94 newhashes = new[:1] if new[1] == node.nullid else new
95 dirstate.journalstorage.record(
95 dirstate.journalstorage.record(
96 wdirparenttype, '.', oldhashes, newhashes)
96 wdirparenttype, '.', oldhashes, newhashes)
97
97
98 # hooks to record bookmark changes (both local and remote)
98 # hooks to record bookmark changes (both local and remote)
99 def recordbookmarks(orig, store, fp):
99 def recordbookmarks(orig, store, fp):
100 """Records all bookmark changes in the journal."""
100 """Records all bookmark changes in the journal."""
101 repo = store._repo
101 repo = store._repo
102 if util.safehasattr(repo, 'journal'):
102 if util.safehasattr(repo, 'journal'):
103 oldmarks = bookmarks.bmstore(repo)
103 oldmarks = bookmarks.bmstore(repo)
104 for mark, value in store.iteritems():
104 for mark, value in store.iteritems():
105 oldvalue = oldmarks.get(mark, node.nullid)
105 oldvalue = oldmarks.get(mark, node.nullid)
106 if value != oldvalue:
106 if value != oldvalue:
107 repo.journal.record(bookmarktype, mark, oldvalue, value)
107 repo.journal.record(bookmarktype, mark, oldvalue, value)
108 return orig(store, fp)
108 return orig(store, fp)
109
109
110 # shared repository support
110 # shared repository support
111 def _readsharedfeatures(repo):
111 def _readsharedfeatures(repo):
112 """A set of shared features for this repository"""
112 """A set of shared features for this repository"""
113 try:
113 try:
114 return set(repo.vfs.read('shared').splitlines())
114 return set(repo.vfs.read('shared').splitlines())
115 except IOError as inst:
115 except IOError as inst:
116 if inst.errno != errno.ENOENT:
116 if inst.errno != errno.ENOENT:
117 raise
117 raise
118 return set()
118 return set()
119
119
120 def _mergeentriesiter(*iterables, **kwargs):
120 def _mergeentriesiter(*iterables, **kwargs):
121 """Given a set of sorted iterables, yield the next entry in merged order
121 """Given a set of sorted iterables, yield the next entry in merged order
122
122
123 Note that by default entries go from most recent to oldest.
123 Note that by default entries go from most recent to oldest.
124 """
124 """
125 order = kwargs.pop('order', max)
125 order = kwargs.pop('order', max)
126 iterables = [iter(it) for it in iterables]
126 iterables = [iter(it) for it in iterables]
127 # this tracks still active iterables; iterables are deleted as they are
127 # this tracks still active iterables; iterables are deleted as they are
128 # exhausted, which is why this is a dictionary and why each entry also
128 # exhausted, which is why this is a dictionary and why each entry also
129 # stores the key. Entries are mutable so we can store the next value each
129 # stores the key. Entries are mutable so we can store the next value each
130 # time.
130 # time.
131 iterable_map = {}
131 iterable_map = {}
132 for key, it in enumerate(iterables):
132 for key, it in enumerate(iterables):
133 try:
133 try:
134 iterable_map[key] = [next(it), key, it]
134 iterable_map[key] = [next(it), key, it]
135 except StopIteration:
135 except StopIteration:
136 # empty entry, can be ignored
136 # empty entry, can be ignored
137 pass
137 pass
138
138
139 while iterable_map:
139 while iterable_map:
140 value, key, it = order(iterable_map.itervalues())
140 value, key, it = order(iterable_map.itervalues())
141 yield value
141 yield value
142 try:
142 try:
143 iterable_map[key][0] = next(it)
143 iterable_map[key][0] = next(it)
144 except StopIteration:
144 except StopIteration:
145 # this iterable is empty, remove it from consideration
145 # this iterable is empty, remove it from consideration
146 del iterable_map[key]
146 del iterable_map[key]
147
147
148 def wrappostshare(orig, sourcerepo, destrepo, **kwargs):
148 def wrappostshare(orig, sourcerepo, destrepo, **kwargs):
149 """Mark this shared working copy as sharing journal information"""
149 """Mark this shared working copy as sharing journal information"""
150 with destrepo.wlock():
150 with destrepo.wlock():
151 orig(sourcerepo, destrepo, **kwargs)
151 orig(sourcerepo, destrepo, **kwargs)
152 with destrepo.vfs('shared', 'a') as fp:
152 with destrepo.vfs('shared', 'a') as fp:
153 fp.write('journal\n')
153 fp.write('journal\n')
154
154
155 def unsharejournal(orig, ui, repo, repopath):
155 def unsharejournal(orig, ui, repo, repopath):
156 """Copy shared journal entries into this repo when unsharing"""
156 """Copy shared journal entries into this repo when unsharing"""
157 if (repo.path == repopath and repo.shared() and
157 if (repo.path == repopath and repo.shared() and
158 util.safehasattr(repo, 'journal')):
158 util.safehasattr(repo, 'journal')):
159 sharedrepo = share._getsrcrepo(repo)
159 sharedrepo = share._getsrcrepo(repo)
160 sharedfeatures = _readsharedfeatures(repo)
160 sharedfeatures = _readsharedfeatures(repo)
161 if sharedrepo and sharedfeatures > set(['journal']):
161 if sharedrepo and sharedfeatures > set(['journal']):
162 # there is a shared repository and there are shared journal entries
162 # there is a shared repository and there are shared journal entries
163 # to copy. move shared date over from source to destination but
163 # to copy. move shared date over from source to destination but
164 # move the local file first
164 # move the local file first
165 if repo.vfs.exists('journal'):
165 if repo.vfs.exists('journal'):
166 journalpath = repo.join('journal')
166 journalpath = repo.join('journal')
167 util.rename(journalpath, journalpath + '.bak')
167 util.rename(journalpath, journalpath + '.bak')
168 storage = repo.journal
168 storage = repo.journal
169 local = storage._open(
169 local = storage._open(
170 repo.vfs, filename='journal.bak', _newestfirst=False)
170 repo.vfs, filename='journal.bak', _newestfirst=False)
171 shared = (
171 shared = (
172 e for e in storage._open(sharedrepo.vfs, _newestfirst=False)
172 e for e in storage._open(sharedrepo.vfs, _newestfirst=False)
173 if sharednamespaces.get(e.namespace) in sharedfeatures)
173 if sharednamespaces.get(e.namespace) in sharedfeatures)
174 for entry in _mergeentriesiter(local, shared, order=min):
174 for entry in _mergeentriesiter(local, shared, order=min):
175 storage._write(repo.vfs, entry)
175 storage._write(repo.vfs, entry)
176
176
177 return orig(ui, repo, repopath)
177 return orig(ui, repo, repopath)
178
178
179 class journalentry(collections.namedtuple(
179 class journalentry(collections.namedtuple(
180 'journalentry',
180 'journalentry',
181 'timestamp user command namespace name oldhashes newhashes')):
181 'timestamp user command namespace name oldhashes newhashes')):
182 """Individual journal entry
182 """Individual journal entry
183
183
184 * timestamp: a mercurial (time, timezone) tuple
184 * timestamp: a mercurial (time, timezone) tuple
185 * user: the username that ran the command
185 * user: the username that ran the command
186 * namespace: the entry namespace, an opaque string
186 * namespace: the entry namespace, an opaque string
187 * name: the name of the changed item, opaque string with meaning in the
187 * name: the name of the changed item, opaque string with meaning in the
188 namespace
188 namespace
189 * command: the hg command that triggered this record
189 * command: the hg command that triggered this record
190 * oldhashes: a tuple of one or more binary hashes for the old location
190 * oldhashes: a tuple of one or more binary hashes for the old location
191 * newhashes: a tuple of one or more binary hashes for the new location
191 * newhashes: a tuple of one or more binary hashes for the new location
192
192
193 Handles serialisation from and to the storage format. Fields are
193 Handles serialisation from and to the storage format. Fields are
194 separated by newlines, hashes are written out in hex separated by commas,
194 separated by newlines, hashes are written out in hex separated by commas,
195 timestamp and timezone are separated by a space.
195 timestamp and timezone are separated by a space.
196
196
197 """
197 """
198 @classmethod
198 @classmethod
199 def fromstorage(cls, line):
199 def fromstorage(cls, line):
200 (time, user, command, namespace, name,
200 (time, user, command, namespace, name,
201 oldhashes, newhashes) = line.split('\n')
201 oldhashes, newhashes) = line.split('\n')
202 timestamp, tz = time.split()
202 timestamp, tz = time.split()
203 timestamp, tz = float(timestamp), int(tz)
203 timestamp, tz = float(timestamp), int(tz)
204 oldhashes = tuple(node.bin(hash) for hash in oldhashes.split(','))
204 oldhashes = tuple(node.bin(hash) for hash in oldhashes.split(','))
205 newhashes = tuple(node.bin(hash) for hash in newhashes.split(','))
205 newhashes = tuple(node.bin(hash) for hash in newhashes.split(','))
206 return cls(
206 return cls(
207 (timestamp, tz), user, command, namespace, name,
207 (timestamp, tz), user, command, namespace, name,
208 oldhashes, newhashes)
208 oldhashes, newhashes)
209
209
210 def __str__(self):
210 def __str__(self):
211 """String representation for storage"""
211 """String representation for storage"""
212 time = ' '.join(map(str, self.timestamp))
212 time = ' '.join(map(str, self.timestamp))
213 oldhashes = ','.join([node.hex(hash) for hash in self.oldhashes])
213 oldhashes = ','.join([node.hex(hash) for hash in self.oldhashes])
214 newhashes = ','.join([node.hex(hash) for hash in self.newhashes])
214 newhashes = ','.join([node.hex(hash) for hash in self.newhashes])
215 return '\n'.join((
215 return '\n'.join((
216 time, self.user, self.command, self.namespace, self.name,
216 time, self.user, self.command, self.namespace, self.name,
217 oldhashes, newhashes))
217 oldhashes, newhashes))
218
218
219 class journalstorage(object):
219 class journalstorage(object):
220 """Storage for journal entries
220 """Storage for journal entries
221
221
222 Entries are divided over two files; one with entries that pertain to the
222 Entries are divided over two files; one with entries that pertain to the
223 local working copy *only*, and one with entries that are shared across
223 local working copy *only*, and one with entries that are shared across
224 multiple working copies when shared using the share extension.
224 multiple working copies when shared using the share extension.
225
225
226 Entries are stored with NUL bytes as separators. See the journalentry
226 Entries are stored with NUL bytes as separators. See the journalentry
227 class for the per-entry structure.
227 class for the per-entry structure.
228
228
229 The file format starts with an integer version, delimited by a NUL.
229 The file format starts with an integer version, delimited by a NUL.
230
230
231 This storage uses a dedicated lock; this makes it easier to avoid issues
231 This storage uses a dedicated lock; this makes it easier to avoid issues
232 with adding entries that added when the regular wlock is unlocked (e.g.
232 with adding entries that added when the regular wlock is unlocked (e.g.
233 the dirstate).
233 the dirstate).
234
234
235 """
235 """
236 _currentcommand = ()
236 _currentcommand = ()
237 _lockref = None
237 _lockref = None
238
238
239 def __init__(self, repo):
239 def __init__(self, repo):
240 self.user = util.getuser()
240 self.user = util.getuser()
241 self.ui = repo.ui
241 self.ui = repo.ui
242 self.vfs = repo.vfs
242 self.vfs = repo.vfs
243
243
244 # is this working copy using a shared storage?
244 # is this working copy using a shared storage?
245 self.sharedfeatures = self.sharedvfs = None
245 self.sharedfeatures = self.sharedvfs = None
246 if repo.shared():
246 if repo.shared():
247 features = _readsharedfeatures(repo)
247 features = _readsharedfeatures(repo)
248 sharedrepo = share._getsrcrepo(repo)
248 sharedrepo = share._getsrcrepo(repo)
249 if sharedrepo is not None and 'journal' in features:
249 if sharedrepo is not None and 'journal' in features:
250 self.sharedvfs = sharedrepo.vfs
250 self.sharedvfs = sharedrepo.vfs
251 self.sharedfeatures = features
251 self.sharedfeatures = features
252
252
253 # track the current command for recording in journal entries
253 # track the current command for recording in journal entries
254 @property
254 @property
255 def command(self):
255 def command(self):
256 commandstr = ' '.join(
256 commandstr = ' '.join(
257 map(util.shellquote, journalstorage._currentcommand))
257 map(util.shellquote, journalstorage._currentcommand))
258 if '\n' in commandstr:
258 if '\n' in commandstr:
259 # truncate multi-line commands
259 # truncate multi-line commands
260 commandstr = commandstr.partition('\n')[0] + ' ...'
260 commandstr = commandstr.partition('\n')[0] + ' ...'
261 return commandstr
261 return commandstr
262
262
263 @classmethod
263 @classmethod
264 def recordcommand(cls, *fullargs):
264 def recordcommand(cls, *fullargs):
265 """Set the current hg arguments, stored with recorded entries"""
265 """Set the current hg arguments, stored with recorded entries"""
266 # Set the current command on the class because we may have started
266 # Set the current command on the class because we may have started
267 # with a non-local repo (cloning for example).
267 # with a non-local repo (cloning for example).
268 cls._currentcommand = fullargs
268 cls._currentcommand = fullargs
269
269
270 def jlock(self, vfs):
270 def jlock(self, vfs):
271 """Create a lock for the journal file"""
271 """Create a lock for the journal file"""
272 if self._lockref and self._lockref():
272 if self._lockref and self._lockref():
273 raise error.Abort(_('journal lock does not support nesting'))
273 raise error.Abort(_('journal lock does not support nesting'))
274 desc = _('journal of %s') % vfs.base
274 desc = _('journal of %s') % vfs.base
275 try:
275 try:
276 l = lock.lock(vfs, 'journal.lock', 0, desc=desc)
276 l = lock.lock(vfs, 'journal.lock', 0, desc=desc)
277 except error.LockHeld as inst:
277 except error.LockHeld as inst:
278 self.ui.warn(
278 self.ui.warn(
279 _("waiting for lock on %s held by %r\n") % (desc, inst.locker))
279 _("waiting for lock on %s held by %r\n") % (desc, inst.locker))
280 # default to 600 seconds timeout
280 # default to 600 seconds timeout
281 l = lock.lock(
281 l = lock.lock(
282 vfs, 'journal.lock',
282 vfs, 'journal.lock',
283 int(self.ui.config("ui", "timeout", "600")), desc=desc)
283 int(self.ui.config("ui", "timeout", "600")), desc=desc)
284 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
284 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
285 self._lockref = weakref.ref(l)
285 self._lockref = weakref.ref(l)
286 return l
286 return l
287
287
288 def record(self, namespace, name, oldhashes, newhashes):
288 def record(self, namespace, name, oldhashes, newhashes):
289 """Record a new journal entry
289 """Record a new journal entry
290
290
291 * namespace: an opaque string; this can be used to filter on the type
291 * namespace: an opaque string; this can be used to filter on the type
292 of recorded entries.
292 of recorded entries.
293 * name: the name defining this entry; for bookmarks, this is the
293 * name: the name defining this entry; for bookmarks, this is the
294 bookmark name. Can be filtered on when retrieving entries.
294 bookmark name. Can be filtered on when retrieving entries.
295 * oldhashes and newhashes: each a single binary hash, or a list of
295 * oldhashes and newhashes: each a single binary hash, or a list of
296 binary hashes. These represent the old and new position of the named
296 binary hashes. These represent the old and new position of the named
297 item.
297 item.
298
298
299 """
299 """
300 if not isinstance(oldhashes, list):
300 if not isinstance(oldhashes, list):
301 oldhashes = [oldhashes]
301 oldhashes = [oldhashes]
302 if not isinstance(newhashes, list):
302 if not isinstance(newhashes, list):
303 newhashes = [newhashes]
303 newhashes = [newhashes]
304
304
305 entry = journalentry(
305 entry = journalentry(
306 util.makedate(), self.user, self.command, namespace, name,
306 util.makedate(), self.user, self.command, namespace, name,
307 oldhashes, newhashes)
307 oldhashes, newhashes)
308
308
309 vfs = self.vfs
309 vfs = self.vfs
310 if self.sharedvfs is not None:
310 if self.sharedvfs is not None:
311 # write to the shared repository if this feature is being
311 # write to the shared repository if this feature is being
312 # shared between working copies.
312 # shared between working copies.
313 if sharednamespaces.get(namespace) in self.sharedfeatures:
313 if sharednamespaces.get(namespace) in self.sharedfeatures:
314 vfs = self.sharedvfs
314 vfs = self.sharedvfs
315
315
316 self._write(vfs, entry)
316 self._write(vfs, entry)
317
317
318 def _write(self, vfs, entry):
318 def _write(self, vfs, entry):
319 with self.jlock(vfs):
319 with self.jlock(vfs):
320 version = None
320 version = None
321 # open file in amend mode to ensure it is created if missing
321 # open file in amend mode to ensure it is created if missing
322 with vfs('journal', mode='a+b', atomictemp=True) as f:
322 with vfs('journal', mode='a+b', atomictemp=True) as f:
323 f.seek(0, os.SEEK_SET)
323 f.seek(0, os.SEEK_SET)
324 # Read just enough bytes to get a version number (up to 2
324 # Read just enough bytes to get a version number (up to 2
325 # digits plus separator)
325 # digits plus separator)
326 version = f.read(3).partition('\0')[0]
326 version = f.read(3).partition('\0')[0]
327 if version and version != str(storageversion):
327 if version and version != str(storageversion):
328 # different version of the storage. Exit early (and not
328 # different version of the storage. Exit early (and not
329 # write anything) if this is not a version we can handle or
329 # write anything) if this is not a version we can handle or
330 # the file is corrupt. In future, perhaps rotate the file
330 # the file is corrupt. In future, perhaps rotate the file
331 # instead?
331 # instead?
332 self.ui.warn(
332 self.ui.warn(
333 _("unsupported journal file version '%s'\n") % version)
333 _("unsupported journal file version '%s'\n") % version)
334 return
334 return
335 if not version:
335 if not version:
336 # empty file, write version first
336 # empty file, write version first
337 f.write(str(storageversion) + '\0')
337 f.write(str(storageversion) + '\0')
338 f.seek(0, os.SEEK_END)
338 f.seek(0, os.SEEK_END)
339 f.write(str(entry) + '\0')
339 f.write(str(entry) + '\0')
340
340
341 def filtered(self, namespace=None, name=None):
341 def filtered(self, namespace=None, name=None):
342 """Yield all journal entries with the given namespace or name
342 """Yield all journal entries with the given namespace or name
343
343
344 Both the namespace and the name are optional; if neither is given all
344 Both the namespace and the name are optional; if neither is given all
345 entries in the journal are produced.
345 entries in the journal are produced.
346
346
347 Matching supports regular expressions by using the `re:` prefix
347 Matching supports regular expressions by using the `re:` prefix
348 (use `literal:` to match names or namespaces that start with `re:`)
348 (use `literal:` to match names or namespaces that start with `re:`)
349
349
350 """
350 """
351 if namespace is not None:
351 if namespace is not None:
352 namespace = util.stringmatcher(namespace)[-1]
352 namespace = util.stringmatcher(namespace)[-1]
353 if name is not None:
353 if name is not None:
354 name = util.stringmatcher(name)[-1]
354 name = util.stringmatcher(name)[-1]
355 for entry in self:
355 for entry in self:
356 if namespace is not None and not namespace(entry.namespace):
356 if namespace is not None and not namespace(entry.namespace):
357 continue
357 continue
358 if name is not None and not name(entry.name):
358 if name is not None and not name(entry.name):
359 continue
359 continue
360 yield entry
360 yield entry
361
361
362 def __iter__(self):
362 def __iter__(self):
363 """Iterate over the storage
363 """Iterate over the storage
364
364
365 Yields journalentry instances for each contained journal record.
365 Yields journalentry instances for each contained journal record.
366
366
367 """
367 """
368 local = self._open(self.vfs)
368 local = self._open(self.vfs)
369
369
370 if self.sharedvfs is None:
370 if self.sharedvfs is None:
371 return local
371 return local
372
372
373 # iterate over both local and shared entries, but only those
373 # iterate over both local and shared entries, but only those
374 # shared entries that are among the currently shared features
374 # shared entries that are among the currently shared features
375 shared = (
375 shared = (
376 e for e in self._open(self.sharedvfs)
376 e for e in self._open(self.sharedvfs)
377 if sharednamespaces.get(e.namespace) in self.sharedfeatures)
377 if sharednamespaces.get(e.namespace) in self.sharedfeatures)
378 return _mergeentriesiter(local, shared)
378 return _mergeentriesiter(local, shared)
379
379
380 def _open(self, vfs, filename='journal', _newestfirst=True):
380 def _open(self, vfs, filename='journal', _newestfirst=True):
381 if not vfs.exists(filename):
381 if not vfs.exists(filename):
382 return
382 return
383
383
384 with vfs(filename) as f:
384 with vfs(filename) as f:
385 raw = f.read()
385 raw = f.read()
386
386
387 lines = raw.split('\0')
387 lines = raw.split('\0')
388 version = lines and lines[0]
388 version = lines and lines[0]
389 if version != str(storageversion):
389 if version != str(storageversion):
390 version = version or _('not available')
390 version = version or _('not available')
391 raise error.Abort(_("unknown journal file version '%s'") % version)
391 raise error.Abort(_("unknown journal file version '%s'") % version)
392
392
393 # Skip the first line, it's a version number. Normally we iterate over
393 # Skip the first line, it's a version number. Normally we iterate over
394 # these in reverse order to list newest first; only when copying across
394 # these in reverse order to list newest first; only when copying across
395 # a shared storage do we forgo reversing.
395 # a shared storage do we forgo reversing.
396 lines = lines[1:]
396 lines = lines[1:]
397 if _newestfirst:
397 if _newestfirst:
398 lines = reversed(lines)
398 lines = reversed(lines)
399 for line in lines:
399 for line in lines:
400 if not line:
400 if not line:
401 continue
401 continue
402 yield journalentry.fromstorage(line)
402 yield journalentry.fromstorage(line)
403
403
404 # journal reading
404 # journal reading
405 # log options that don't make sense for journal
405 # log options that don't make sense for journal
406 _ignoreopts = ('no-merges', 'graph')
406 _ignoreopts = ('no-merges', 'graph')
407 @command(
407 @command(
408 'journal', [
408 'journal', [
409 ('', 'all', None, 'show history for all names'),
409 ('', 'all', None, 'show history for all names'),
410 ('c', 'commits', None, 'show commit metadata'),
410 ('c', 'commits', None, 'show commit metadata'),
411 ] + [opt for opt in commands.logopts if opt[1] not in _ignoreopts],
411 ] + [opt for opt in commands.logopts if opt[1] not in _ignoreopts],
412 '[OPTION]... [BOOKMARKNAME]')
412 '[OPTION]... [BOOKMARKNAME]')
413 def journal(ui, repo, *args, **opts):
413 def journal(ui, repo, *args, **opts):
414 """show the previous position of bookmarks and the working copy
414 """show the previous position of bookmarks and the working copy
415
415
416 The journal is used to see the previous commits that bookmarks and the
416 The journal is used to see the previous commits that bookmarks and the
417 working copy pointed to. By default the previous locations for the working
417 working copy pointed to. By default the previous locations for the working
418 copy. Passing a bookmark name will show all the previous positions of
418 copy. Passing a bookmark name will show all the previous positions of
419 that bookmark. Use the --all switch to show previous locations for all
419 that bookmark. Use the --all switch to show previous locations for all
420 bookmarks and the working copy; each line will then include the bookmark
420 bookmarks and the working copy; each line will then include the bookmark
421 name, or '.' for the working copy, as well.
421 name, or '.' for the working copy, as well.
422
422
423 If `name` starts with `re:`, the remainder of the name is treated as
423 If `name` starts with `re:`, the remainder of the name is treated as
424 a regular expression. To match a name that actually starts with `re:`,
424 a regular expression. To match a name that actually starts with `re:`,
425 use the prefix `literal:`.
425 use the prefix `literal:`.
426
426
427 By default hg journal only shows the commit hash and the command that was
427 By default hg journal only shows the commit hash and the command that was
428 running at that time. -v/--verbose will show the prior hash, the user, and
428 running at that time. -v/--verbose will show the prior hash, the user, and
429 the time at which it happened.
429 the time at which it happened.
430
430
431 Use -c/--commits to output log information on each commit hash; at this
431 Use -c/--commits to output log information on each commit hash; at this
432 point you can use the usual `--patch`, `--git`, `--stat` and `--template`
432 point you can use the usual `--patch`, `--git`, `--stat` and `--template`
433 switches to alter the log output for these.
433 switches to alter the log output for these.
434
434
435 `hg journal -T json` can be used to produce machine readable output.
435 `hg journal -T json` can be used to produce machine readable output.
436
436
437 """
437 """
438 name = '.'
438 name = '.'
439 if opts.get('all'):
439 if opts.get('all'):
440 if args:
440 if args:
441 raise error.Abort(
441 raise error.Abort(
442 _("You can't combine --all and filtering on a name"))
442 _("You can't combine --all and filtering on a name"))
443 name = None
443 name = None
444 if args:
444 if args:
445 name = args[0]
445 name = args[0]
446
446
447 fm = ui.formatter('journal', opts)
447 fm = ui.formatter('journal', opts)
448
448
449 if opts.get("template") != "json":
449 if opts.get("template") != "json":
450 if name is None:
450 if name is None:
451 displayname = _('the working copy and bookmarks')
451 displayname = _('the working copy and bookmarks')
452 else:
452 else:
453 displayname = "'%s'" % name
453 displayname = "'%s'" % name
454 ui.status(_("previous locations of %s:\n") % displayname)
454 ui.status(_("previous locations of %s:\n") % displayname)
455
455
456 limit = cmdutil.loglimit(opts)
456 limit = cmdutil.loglimit(opts)
457 entry = None
457 entry = None
458 for count, entry in enumerate(repo.journal.filtered(name=name)):
458 for count, entry in enumerate(repo.journal.filtered(name=name)):
459 if count == limit:
459 if count == limit:
460 break
460 break
461 newhashesstr = fm.formatlist(map(fm.hexfunc, entry.newhashes),
461 newhashesstr = fm.formatlist(map(fm.hexfunc, entry.newhashes),
462 name='node', sep=',')
462 name='node', sep=',')
463 oldhashesstr = fm.formatlist(map(fm.hexfunc, entry.oldhashes),
463 oldhashesstr = fm.formatlist(map(fm.hexfunc, entry.oldhashes),
464 name='node', sep=',')
464 name='node', sep=',')
465
465
466 fm.startitem()
466 fm.startitem()
467 fm.condwrite(ui.verbose, 'oldhashes', '%s -> ', oldhashesstr)
467 fm.condwrite(ui.verbose, 'oldhashes', '%s -> ', oldhashesstr)
468 fm.write('newhashes', '%s', newhashesstr)
468 fm.write('newhashes', '%s', newhashesstr)
469 fm.condwrite(ui.verbose, 'user', ' %-8s', entry.user)
469 fm.condwrite(ui.verbose, 'user', ' %-8s', entry.user)
470 fm.condwrite(
470 fm.condwrite(
471 opts.get('all') or name.startswith('re:'),
471 opts.get('all') or name.startswith('re:'),
472 'name', ' %-8s', entry.name)
472 'name', ' %-8s', entry.name)
473
473
474 timestring = fm.formatdate(entry.timestamp, '%Y-%m-%d %H:%M %1%2')
474 timestring = fm.formatdate(entry.timestamp, '%Y-%m-%d %H:%M %1%2')
475 fm.condwrite(ui.verbose, 'date', ' %s', timestring)
475 fm.condwrite(ui.verbose, 'date', ' %s', timestring)
476 fm.write('command', ' %s\n', entry.command)
476 fm.write('command', ' %s\n', entry.command)
477
477
478 if opts.get("commits"):
478 if opts.get("commits"):
479 displayer = cmdutil.show_changeset(ui, repo, opts, buffered=False)
479 displayer = cmdutil.show_changeset(ui, repo, opts, buffered=False)
480 for hash in entry.newhashes:
480 for hash in entry.newhashes:
481 try:
481 try:
482 ctx = repo[hash]
482 ctx = repo[hash]
483 displayer.show(ctx)
483 displayer.show(ctx)
484 except error.RepoLookupError as e:
484 except error.RepoLookupError as e:
485 fm.write('repolookuperror', "%s\n\n", str(e))
485 fm.write('repolookuperror', "%s\n\n", str(e))
486 displayer.close()
486 displayer.close()
487
487
488 fm.end()
488 fm.end()
489
489
490 if entry is None:
490 if entry is None:
491 ui.status(_("no recorded locations\n"))
491 ui.status(_("no recorded locations\n"))
@@ -1,758 +1,758 b''
1 # keyword.py - $Keyword$ expansion for Mercurial
1 # keyword.py - $Keyword$ expansion for Mercurial
2 #
2 #
3 # Copyright 2007-2015 Christian Ebert <blacktrash@gmx.net>
3 # Copyright 2007-2015 Christian Ebert <blacktrash@gmx.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 #
7 #
8 # $Id$
8 # $Id$
9 #
9 #
10 # Keyword expansion hack against the grain of a Distributed SCM
10 # Keyword expansion hack against the grain of a Distributed SCM
11 #
11 #
12 # There are many good reasons why this is not needed in a distributed
12 # There are many good reasons why this is not needed in a distributed
13 # SCM, still it may be useful in very small projects based on single
13 # SCM, still it may be useful in very small projects based on single
14 # files (like LaTeX packages), that are mostly addressed to an
14 # files (like LaTeX packages), that are mostly addressed to an
15 # audience not running a version control system.
15 # audience not running a version control system.
16 #
16 #
17 # For in-depth discussion refer to
17 # For in-depth discussion refer to
18 # <https://mercurial-scm.org/wiki/KeywordPlan>.
18 # <https://mercurial-scm.org/wiki/KeywordPlan>.
19 #
19 #
20 # Keyword expansion is based on Mercurial's changeset template mappings.
20 # Keyword expansion is based on Mercurial's changeset template mappings.
21 #
21 #
22 # Binary files are not touched.
22 # Binary files are not touched.
23 #
23 #
24 # Files to act upon/ignore are specified in the [keyword] section.
24 # Files to act upon/ignore are specified in the [keyword] section.
25 # Customized keyword template mappings in the [keywordmaps] section.
25 # Customized keyword template mappings in the [keywordmaps] section.
26 #
26 #
27 # Run "hg help keyword" and "hg kwdemo" to get info on configuration.
27 # Run "hg help keyword" and "hg kwdemo" to get info on configuration.
28
28
29 '''expand keywords in tracked files
29 '''expand keywords in tracked files
30
30
31 This extension expands RCS/CVS-like or self-customized $Keywords$ in
31 This extension expands RCS/CVS-like or self-customized $Keywords$ in
32 tracked text files selected by your configuration.
32 tracked text files selected by your configuration.
33
33
34 Keywords are only expanded in local repositories and not stored in the
34 Keywords are only expanded in local repositories and not stored in the
35 change history. The mechanism can be regarded as a convenience for the
35 change history. The mechanism can be regarded as a convenience for the
36 current user or for archive distribution.
36 current user or for archive distribution.
37
37
38 Keywords expand to the changeset data pertaining to the latest change
38 Keywords expand to the changeset data pertaining to the latest change
39 relative to the working directory parent of each file.
39 relative to the working directory parent of each file.
40
40
41 Configuration is done in the [keyword], [keywordset] and [keywordmaps]
41 Configuration is done in the [keyword], [keywordset] and [keywordmaps]
42 sections of hgrc files.
42 sections of hgrc files.
43
43
44 Example::
44 Example::
45
45
46 [keyword]
46 [keyword]
47 # expand keywords in every python file except those matching "x*"
47 # expand keywords in every python file except those matching "x*"
48 **.py =
48 **.py =
49 x* = ignore
49 x* = ignore
50
50
51 [keywordset]
51 [keywordset]
52 # prefer svn- over cvs-like default keywordmaps
52 # prefer svn- over cvs-like default keywordmaps
53 svn = True
53 svn = True
54
54
55 .. note::
55 .. note::
56
56
57 The more specific you are in your filename patterns the less you
57 The more specific you are in your filename patterns the less you
58 lose speed in huge repositories.
58 lose speed in huge repositories.
59
59
60 For [keywordmaps] template mapping and expansion demonstration and
60 For [keywordmaps] template mapping and expansion demonstration and
61 control run :hg:`kwdemo`. See :hg:`help templates` for a list of
61 control run :hg:`kwdemo`. See :hg:`help templates` for a list of
62 available templates and filters.
62 available templates and filters.
63
63
64 Three additional date template filters are provided:
64 Three additional date template filters are provided:
65
65
66 :``utcdate``: "2006/09/18 15:13:13"
66 :``utcdate``: "2006/09/18 15:13:13"
67 :``svnutcdate``: "2006-09-18 15:13:13Z"
67 :``svnutcdate``: "2006-09-18 15:13:13Z"
68 :``svnisodate``: "2006-09-18 08:13:13 -700 (Mon, 18 Sep 2006)"
68 :``svnisodate``: "2006-09-18 08:13:13 -700 (Mon, 18 Sep 2006)"
69
69
70 The default template mappings (view with :hg:`kwdemo -d`) can be
70 The default template mappings (view with :hg:`kwdemo -d`) can be
71 replaced with customized keywords and templates. Again, run
71 replaced with customized keywords and templates. Again, run
72 :hg:`kwdemo` to control the results of your configuration changes.
72 :hg:`kwdemo` to control the results of your configuration changes.
73
73
74 Before changing/disabling active keywords, you must run :hg:`kwshrink`
74 Before changing/disabling active keywords, you must run :hg:`kwshrink`
75 to avoid storing expanded keywords in the change history.
75 to avoid storing expanded keywords in the change history.
76
76
77 To force expansion after enabling it, or a configuration change, run
77 To force expansion after enabling it, or a configuration change, run
78 :hg:`kwexpand`.
78 :hg:`kwexpand`.
79
79
80 Expansions spanning more than one line and incremental expansions,
80 Expansions spanning more than one line and incremental expansions,
81 like CVS' $Log$, are not supported. A keyword template map "Log =
81 like CVS' $Log$, are not supported. A keyword template map "Log =
82 {desc}" expands to the first line of the changeset description.
82 {desc}" expands to the first line of the changeset description.
83 '''
83 '''
84
84
85
85
86 from __future__ import absolute_import
86 from __future__ import absolute_import
87
87
88 import os
88 import os
89 import re
89 import re
90 import tempfile
90 import tempfile
91
91
92 from mercurial.i18n import _
92 from mercurial.i18n import _
93 from mercurial.hgweb import webcommands
93 from mercurial.hgweb import webcommands
94
94
95 from mercurial import (
95 from mercurial import (
96 cmdutil,
96 cmdutil,
97 commands,
97 commands,
98 context,
98 context,
99 dispatch,
99 dispatch,
100 error,
100 error,
101 extensions,
101 extensions,
102 filelog,
102 filelog,
103 localrepo,
103 localrepo,
104 match,
104 match,
105 patch,
105 patch,
106 pathutil,
106 pathutil,
107 registrar,
107 registrar,
108 scmutil,
108 scmutil,
109 templatefilters,
109 templatefilters,
110 util,
110 util,
111 )
111 )
112
112
113 cmdtable = {}
113 cmdtable = {}
114 command = cmdutil.command(cmdtable)
114 command = cmdutil.command(cmdtable)
115 # Note for extension authors: ONLY specify testedwith = 'internal' for
115 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
116 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
116 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
117 # be specifying the version(s) of Mercurial they are tested with, or
117 # be specifying the version(s) of Mercurial they are tested with, or
118 # leave the attribute unspecified.
118 # leave the attribute unspecified.
119 testedwith = 'internal'
119 testedwith = 'ships-with-hg-core'
120
120
121 # hg commands that do not act on keywords
121 # hg commands that do not act on keywords
122 nokwcommands = ('add addremove annotate bundle export grep incoming init log'
122 nokwcommands = ('add addremove annotate bundle export grep incoming init log'
123 ' outgoing push tip verify convert email glog')
123 ' outgoing push tip verify convert email glog')
124
124
125 # hg commands that trigger expansion only when writing to working dir,
125 # hg commands that trigger expansion only when writing to working dir,
126 # not when reading filelog, and unexpand when reading from working dir
126 # not when reading filelog, and unexpand when reading from working dir
127 restricted = ('merge kwexpand kwshrink record qrecord resolve transplant'
127 restricted = ('merge kwexpand kwshrink record qrecord resolve transplant'
128 ' unshelve rebase graft backout histedit fetch')
128 ' unshelve rebase graft backout histedit fetch')
129
129
130 # names of extensions using dorecord
130 # names of extensions using dorecord
131 recordextensions = 'record'
131 recordextensions = 'record'
132
132
133 colortable = {
133 colortable = {
134 'kwfiles.enabled': 'green bold',
134 'kwfiles.enabled': 'green bold',
135 'kwfiles.deleted': 'cyan bold underline',
135 'kwfiles.deleted': 'cyan bold underline',
136 'kwfiles.enabledunknown': 'green',
136 'kwfiles.enabledunknown': 'green',
137 'kwfiles.ignored': 'bold',
137 'kwfiles.ignored': 'bold',
138 'kwfiles.ignoredunknown': 'none'
138 'kwfiles.ignoredunknown': 'none'
139 }
139 }
140
140
141 templatefilter = registrar.templatefilter()
141 templatefilter = registrar.templatefilter()
142
142
143 # date like in cvs' $Date
143 # date like in cvs' $Date
144 @templatefilter('utcdate')
144 @templatefilter('utcdate')
145 def utcdate(text):
145 def utcdate(text):
146 '''Date. Returns a UTC-date in this format: "2009/08/18 11:00:13".
146 '''Date. Returns a UTC-date in this format: "2009/08/18 11:00:13".
147 '''
147 '''
148 return util.datestr((util.parsedate(text)[0], 0), '%Y/%m/%d %H:%M:%S')
148 return util.datestr((util.parsedate(text)[0], 0), '%Y/%m/%d %H:%M:%S')
149 # date like in svn's $Date
149 # date like in svn's $Date
150 @templatefilter('svnisodate')
150 @templatefilter('svnisodate')
151 def svnisodate(text):
151 def svnisodate(text):
152 '''Date. Returns a date in this format: "2009-08-18 13:00:13
152 '''Date. Returns a date in this format: "2009-08-18 13:00:13
153 +0200 (Tue, 18 Aug 2009)".
153 +0200 (Tue, 18 Aug 2009)".
154 '''
154 '''
155 return util.datestr(text, '%Y-%m-%d %H:%M:%S %1%2 (%a, %d %b %Y)')
155 return util.datestr(text, '%Y-%m-%d %H:%M:%S %1%2 (%a, %d %b %Y)')
156 # date like in svn's $Id
156 # date like in svn's $Id
157 @templatefilter('svnutcdate')
157 @templatefilter('svnutcdate')
158 def svnutcdate(text):
158 def svnutcdate(text):
159 '''Date. Returns a UTC-date in this format: "2009-08-18
159 '''Date. Returns a UTC-date in this format: "2009-08-18
160 11:00:13Z".
160 11:00:13Z".
161 '''
161 '''
162 return util.datestr((util.parsedate(text)[0], 0), '%Y-%m-%d %H:%M:%SZ')
162 return util.datestr((util.parsedate(text)[0], 0), '%Y-%m-%d %H:%M:%SZ')
163
163
164 # make keyword tools accessible
164 # make keyword tools accessible
165 kwtools = {'templater': None, 'hgcmd': ''}
165 kwtools = {'templater': None, 'hgcmd': ''}
166
166
167 def _defaultkwmaps(ui):
167 def _defaultkwmaps(ui):
168 '''Returns default keywordmaps according to keywordset configuration.'''
168 '''Returns default keywordmaps according to keywordset configuration.'''
169 templates = {
169 templates = {
170 'Revision': '{node|short}',
170 'Revision': '{node|short}',
171 'Author': '{author|user}',
171 'Author': '{author|user}',
172 }
172 }
173 kwsets = ({
173 kwsets = ({
174 'Date': '{date|utcdate}',
174 'Date': '{date|utcdate}',
175 'RCSfile': '{file|basename},v',
175 'RCSfile': '{file|basename},v',
176 'RCSFile': '{file|basename},v', # kept for backwards compatibility
176 'RCSFile': '{file|basename},v', # kept for backwards compatibility
177 # with hg-keyword
177 # with hg-keyword
178 'Source': '{root}/{file},v',
178 'Source': '{root}/{file},v',
179 'Id': '{file|basename},v {node|short} {date|utcdate} {author|user}',
179 'Id': '{file|basename},v {node|short} {date|utcdate} {author|user}',
180 'Header': '{root}/{file},v {node|short} {date|utcdate} {author|user}',
180 'Header': '{root}/{file},v {node|short} {date|utcdate} {author|user}',
181 }, {
181 }, {
182 'Date': '{date|svnisodate}',
182 'Date': '{date|svnisodate}',
183 'Id': '{file|basename},v {node|short} {date|svnutcdate} {author|user}',
183 'Id': '{file|basename},v {node|short} {date|svnutcdate} {author|user}',
184 'LastChangedRevision': '{node|short}',
184 'LastChangedRevision': '{node|short}',
185 'LastChangedBy': '{author|user}',
185 'LastChangedBy': '{author|user}',
186 'LastChangedDate': '{date|svnisodate}',
186 'LastChangedDate': '{date|svnisodate}',
187 })
187 })
188 templates.update(kwsets[ui.configbool('keywordset', 'svn')])
188 templates.update(kwsets[ui.configbool('keywordset', 'svn')])
189 return templates
189 return templates
190
190
191 def _shrinktext(text, subfunc):
191 def _shrinktext(text, subfunc):
192 '''Helper for keyword expansion removal in text.
192 '''Helper for keyword expansion removal in text.
193 Depending on subfunc also returns number of substitutions.'''
193 Depending on subfunc also returns number of substitutions.'''
194 return subfunc(r'$\1$', text)
194 return subfunc(r'$\1$', text)
195
195
196 def _preselect(wstatus, changed):
196 def _preselect(wstatus, changed):
197 '''Retrieves modified and added files from a working directory state
197 '''Retrieves modified and added files from a working directory state
198 and returns the subset of each contained in given changed files
198 and returns the subset of each contained in given changed files
199 retrieved from a change context.'''
199 retrieved from a change context.'''
200 modified = [f for f in wstatus.modified if f in changed]
200 modified = [f for f in wstatus.modified if f in changed]
201 added = [f for f in wstatus.added if f in changed]
201 added = [f for f in wstatus.added if f in changed]
202 return modified, added
202 return modified, added
203
203
204
204
205 class kwtemplater(object):
205 class kwtemplater(object):
206 '''
206 '''
207 Sets up keyword templates, corresponding keyword regex, and
207 Sets up keyword templates, corresponding keyword regex, and
208 provides keyword substitution functions.
208 provides keyword substitution functions.
209 '''
209 '''
210
210
211 def __init__(self, ui, repo, inc, exc):
211 def __init__(self, ui, repo, inc, exc):
212 self.ui = ui
212 self.ui = ui
213 self.repo = repo
213 self.repo = repo
214 self.match = match.match(repo.root, '', [], inc, exc)
214 self.match = match.match(repo.root, '', [], inc, exc)
215 self.restrict = kwtools['hgcmd'] in restricted.split()
215 self.restrict = kwtools['hgcmd'] in restricted.split()
216 self.postcommit = False
216 self.postcommit = False
217
217
218 kwmaps = self.ui.configitems('keywordmaps')
218 kwmaps = self.ui.configitems('keywordmaps')
219 if kwmaps: # override default templates
219 if kwmaps: # override default templates
220 self.templates = dict(kwmaps)
220 self.templates = dict(kwmaps)
221 else:
221 else:
222 self.templates = _defaultkwmaps(self.ui)
222 self.templates = _defaultkwmaps(self.ui)
223
223
224 @util.propertycache
224 @util.propertycache
225 def escape(self):
225 def escape(self):
226 '''Returns bar-separated and escaped keywords.'''
226 '''Returns bar-separated and escaped keywords.'''
227 return '|'.join(map(re.escape, self.templates.keys()))
227 return '|'.join(map(re.escape, self.templates.keys()))
228
228
229 @util.propertycache
229 @util.propertycache
230 def rekw(self):
230 def rekw(self):
231 '''Returns regex for unexpanded keywords.'''
231 '''Returns regex for unexpanded keywords.'''
232 return re.compile(r'\$(%s)\$' % self.escape)
232 return re.compile(r'\$(%s)\$' % self.escape)
233
233
234 @util.propertycache
234 @util.propertycache
235 def rekwexp(self):
235 def rekwexp(self):
236 '''Returns regex for expanded keywords.'''
236 '''Returns regex for expanded keywords.'''
237 return re.compile(r'\$(%s): [^$\n\r]*? \$' % self.escape)
237 return re.compile(r'\$(%s): [^$\n\r]*? \$' % self.escape)
238
238
239 def substitute(self, data, path, ctx, subfunc):
239 def substitute(self, data, path, ctx, subfunc):
240 '''Replaces keywords in data with expanded template.'''
240 '''Replaces keywords in data with expanded template.'''
241 def kwsub(mobj):
241 def kwsub(mobj):
242 kw = mobj.group(1)
242 kw = mobj.group(1)
243 ct = cmdutil.changeset_templater(self.ui, self.repo, False, None,
243 ct = cmdutil.changeset_templater(self.ui, self.repo, False, None,
244 self.templates[kw], '', False)
244 self.templates[kw], '', False)
245 self.ui.pushbuffer()
245 self.ui.pushbuffer()
246 ct.show(ctx, root=self.repo.root, file=path)
246 ct.show(ctx, root=self.repo.root, file=path)
247 ekw = templatefilters.firstline(self.ui.popbuffer())
247 ekw = templatefilters.firstline(self.ui.popbuffer())
248 return '$%s: %s $' % (kw, ekw)
248 return '$%s: %s $' % (kw, ekw)
249 return subfunc(kwsub, data)
249 return subfunc(kwsub, data)
250
250
251 def linkctx(self, path, fileid):
251 def linkctx(self, path, fileid):
252 '''Similar to filelog.linkrev, but returns a changectx.'''
252 '''Similar to filelog.linkrev, but returns a changectx.'''
253 return self.repo.filectx(path, fileid=fileid).changectx()
253 return self.repo.filectx(path, fileid=fileid).changectx()
254
254
255 def expand(self, path, node, data):
255 def expand(self, path, node, data):
256 '''Returns data with keywords expanded.'''
256 '''Returns data with keywords expanded.'''
257 if not self.restrict and self.match(path) and not util.binary(data):
257 if not self.restrict and self.match(path) and not util.binary(data):
258 ctx = self.linkctx(path, node)
258 ctx = self.linkctx(path, node)
259 return self.substitute(data, path, ctx, self.rekw.sub)
259 return self.substitute(data, path, ctx, self.rekw.sub)
260 return data
260 return data
261
261
262 def iskwfile(self, cand, ctx):
262 def iskwfile(self, cand, ctx):
263 '''Returns subset of candidates which are configured for keyword
263 '''Returns subset of candidates which are configured for keyword
264 expansion but are not symbolic links.'''
264 expansion but are not symbolic links.'''
265 return [f for f in cand if self.match(f) and 'l' not in ctx.flags(f)]
265 return [f for f in cand if self.match(f) and 'l' not in ctx.flags(f)]
266
266
267 def overwrite(self, ctx, candidates, lookup, expand, rekw=False):
267 def overwrite(self, ctx, candidates, lookup, expand, rekw=False):
268 '''Overwrites selected files expanding/shrinking keywords.'''
268 '''Overwrites selected files expanding/shrinking keywords.'''
269 if self.restrict or lookup or self.postcommit: # exclude kw_copy
269 if self.restrict or lookup or self.postcommit: # exclude kw_copy
270 candidates = self.iskwfile(candidates, ctx)
270 candidates = self.iskwfile(candidates, ctx)
271 if not candidates:
271 if not candidates:
272 return
272 return
273 kwcmd = self.restrict and lookup # kwexpand/kwshrink
273 kwcmd = self.restrict and lookup # kwexpand/kwshrink
274 if self.restrict or expand and lookup:
274 if self.restrict or expand and lookup:
275 mf = ctx.manifest()
275 mf = ctx.manifest()
276 if self.restrict or rekw:
276 if self.restrict or rekw:
277 re_kw = self.rekw
277 re_kw = self.rekw
278 else:
278 else:
279 re_kw = self.rekwexp
279 re_kw = self.rekwexp
280 if expand:
280 if expand:
281 msg = _('overwriting %s expanding keywords\n')
281 msg = _('overwriting %s expanding keywords\n')
282 else:
282 else:
283 msg = _('overwriting %s shrinking keywords\n')
283 msg = _('overwriting %s shrinking keywords\n')
284 for f in candidates:
284 for f in candidates:
285 if self.restrict:
285 if self.restrict:
286 data = self.repo.file(f).read(mf[f])
286 data = self.repo.file(f).read(mf[f])
287 else:
287 else:
288 data = self.repo.wread(f)
288 data = self.repo.wread(f)
289 if util.binary(data):
289 if util.binary(data):
290 continue
290 continue
291 if expand:
291 if expand:
292 parents = ctx.parents()
292 parents = ctx.parents()
293 if lookup:
293 if lookup:
294 ctx = self.linkctx(f, mf[f])
294 ctx = self.linkctx(f, mf[f])
295 elif self.restrict and len(parents) > 1:
295 elif self.restrict and len(parents) > 1:
296 # merge commit
296 # merge commit
297 # in case of conflict f is in modified state during
297 # in case of conflict f is in modified state during
298 # merge, even if f does not differ from f in parent
298 # merge, even if f does not differ from f in parent
299 for p in parents:
299 for p in parents:
300 if f in p and not p[f].cmp(ctx[f]):
300 if f in p and not p[f].cmp(ctx[f]):
301 ctx = p[f].changectx()
301 ctx = p[f].changectx()
302 break
302 break
303 data, found = self.substitute(data, f, ctx, re_kw.subn)
303 data, found = self.substitute(data, f, ctx, re_kw.subn)
304 elif self.restrict:
304 elif self.restrict:
305 found = re_kw.search(data)
305 found = re_kw.search(data)
306 else:
306 else:
307 data, found = _shrinktext(data, re_kw.subn)
307 data, found = _shrinktext(data, re_kw.subn)
308 if found:
308 if found:
309 self.ui.note(msg % f)
309 self.ui.note(msg % f)
310 fp = self.repo.wvfs(f, "wb", atomictemp=True)
310 fp = self.repo.wvfs(f, "wb", atomictemp=True)
311 fp.write(data)
311 fp.write(data)
312 fp.close()
312 fp.close()
313 if kwcmd:
313 if kwcmd:
314 self.repo.dirstate.normal(f)
314 self.repo.dirstate.normal(f)
315 elif self.postcommit:
315 elif self.postcommit:
316 self.repo.dirstate.normallookup(f)
316 self.repo.dirstate.normallookup(f)
317
317
318 def shrink(self, fname, text):
318 def shrink(self, fname, text):
319 '''Returns text with all keyword substitutions removed.'''
319 '''Returns text with all keyword substitutions removed.'''
320 if self.match(fname) and not util.binary(text):
320 if self.match(fname) and not util.binary(text):
321 return _shrinktext(text, self.rekwexp.sub)
321 return _shrinktext(text, self.rekwexp.sub)
322 return text
322 return text
323
323
324 def shrinklines(self, fname, lines):
324 def shrinklines(self, fname, lines):
325 '''Returns lines with keyword substitutions removed.'''
325 '''Returns lines with keyword substitutions removed.'''
326 if self.match(fname):
326 if self.match(fname):
327 text = ''.join(lines)
327 text = ''.join(lines)
328 if not util.binary(text):
328 if not util.binary(text):
329 return _shrinktext(text, self.rekwexp.sub).splitlines(True)
329 return _shrinktext(text, self.rekwexp.sub).splitlines(True)
330 return lines
330 return lines
331
331
332 def wread(self, fname, data):
332 def wread(self, fname, data):
333 '''If in restricted mode returns data read from wdir with
333 '''If in restricted mode returns data read from wdir with
334 keyword substitutions removed.'''
334 keyword substitutions removed.'''
335 if self.restrict:
335 if self.restrict:
336 return self.shrink(fname, data)
336 return self.shrink(fname, data)
337 return data
337 return data
338
338
339 class kwfilelog(filelog.filelog):
339 class kwfilelog(filelog.filelog):
340 '''
340 '''
341 Subclass of filelog to hook into its read, add, cmp methods.
341 Subclass of filelog to hook into its read, add, cmp methods.
342 Keywords are "stored" unexpanded, and processed on reading.
342 Keywords are "stored" unexpanded, and processed on reading.
343 '''
343 '''
344 def __init__(self, opener, kwt, path):
344 def __init__(self, opener, kwt, path):
345 super(kwfilelog, self).__init__(opener, path)
345 super(kwfilelog, self).__init__(opener, path)
346 self.kwt = kwt
346 self.kwt = kwt
347 self.path = path
347 self.path = path
348
348
349 def read(self, node):
349 def read(self, node):
350 '''Expands keywords when reading filelog.'''
350 '''Expands keywords when reading filelog.'''
351 data = super(kwfilelog, self).read(node)
351 data = super(kwfilelog, self).read(node)
352 if self.renamed(node):
352 if self.renamed(node):
353 return data
353 return data
354 return self.kwt.expand(self.path, node, data)
354 return self.kwt.expand(self.path, node, data)
355
355
356 def add(self, text, meta, tr, link, p1=None, p2=None):
356 def add(self, text, meta, tr, link, p1=None, p2=None):
357 '''Removes keyword substitutions when adding to filelog.'''
357 '''Removes keyword substitutions when adding to filelog.'''
358 text = self.kwt.shrink(self.path, text)
358 text = self.kwt.shrink(self.path, text)
359 return super(kwfilelog, self).add(text, meta, tr, link, p1, p2)
359 return super(kwfilelog, self).add(text, meta, tr, link, p1, p2)
360
360
361 def cmp(self, node, text):
361 def cmp(self, node, text):
362 '''Removes keyword substitutions for comparison.'''
362 '''Removes keyword substitutions for comparison.'''
363 text = self.kwt.shrink(self.path, text)
363 text = self.kwt.shrink(self.path, text)
364 return super(kwfilelog, self).cmp(node, text)
364 return super(kwfilelog, self).cmp(node, text)
365
365
366 def _status(ui, repo, wctx, kwt, *pats, **opts):
366 def _status(ui, repo, wctx, kwt, *pats, **opts):
367 '''Bails out if [keyword] configuration is not active.
367 '''Bails out if [keyword] configuration is not active.
368 Returns status of working directory.'''
368 Returns status of working directory.'''
369 if kwt:
369 if kwt:
370 return repo.status(match=scmutil.match(wctx, pats, opts), clean=True,
370 return repo.status(match=scmutil.match(wctx, pats, opts), clean=True,
371 unknown=opts.get('unknown') or opts.get('all'))
371 unknown=opts.get('unknown') or opts.get('all'))
372 if ui.configitems('keyword'):
372 if ui.configitems('keyword'):
373 raise error.Abort(_('[keyword] patterns cannot match'))
373 raise error.Abort(_('[keyword] patterns cannot match'))
374 raise error.Abort(_('no [keyword] patterns configured'))
374 raise error.Abort(_('no [keyword] patterns configured'))
375
375
376 def _kwfwrite(ui, repo, expand, *pats, **opts):
376 def _kwfwrite(ui, repo, expand, *pats, **opts):
377 '''Selects files and passes them to kwtemplater.overwrite.'''
377 '''Selects files and passes them to kwtemplater.overwrite.'''
378 wctx = repo[None]
378 wctx = repo[None]
379 if len(wctx.parents()) > 1:
379 if len(wctx.parents()) > 1:
380 raise error.Abort(_('outstanding uncommitted merge'))
380 raise error.Abort(_('outstanding uncommitted merge'))
381 kwt = kwtools['templater']
381 kwt = kwtools['templater']
382 with repo.wlock():
382 with repo.wlock():
383 status = _status(ui, repo, wctx, kwt, *pats, **opts)
383 status = _status(ui, repo, wctx, kwt, *pats, **opts)
384 if status.modified or status.added or status.removed or status.deleted:
384 if status.modified or status.added or status.removed or status.deleted:
385 raise error.Abort(_('outstanding uncommitted changes'))
385 raise error.Abort(_('outstanding uncommitted changes'))
386 kwt.overwrite(wctx, status.clean, True, expand)
386 kwt.overwrite(wctx, status.clean, True, expand)
387
387
388 @command('kwdemo',
388 @command('kwdemo',
389 [('d', 'default', None, _('show default keyword template maps')),
389 [('d', 'default', None, _('show default keyword template maps')),
390 ('f', 'rcfile', '',
390 ('f', 'rcfile', '',
391 _('read maps from rcfile'), _('FILE'))],
391 _('read maps from rcfile'), _('FILE'))],
392 _('hg kwdemo [-d] [-f RCFILE] [TEMPLATEMAP]...'),
392 _('hg kwdemo [-d] [-f RCFILE] [TEMPLATEMAP]...'),
393 optionalrepo=True)
393 optionalrepo=True)
394 def demo(ui, repo, *args, **opts):
394 def demo(ui, repo, *args, **opts):
395 '''print [keywordmaps] configuration and an expansion example
395 '''print [keywordmaps] configuration and an expansion example
396
396
397 Show current, custom, or default keyword template maps and their
397 Show current, custom, or default keyword template maps and their
398 expansions.
398 expansions.
399
399
400 Extend the current configuration by specifying maps as arguments
400 Extend the current configuration by specifying maps as arguments
401 and using -f/--rcfile to source an external hgrc file.
401 and using -f/--rcfile to source an external hgrc file.
402
402
403 Use -d/--default to disable current configuration.
403 Use -d/--default to disable current configuration.
404
404
405 See :hg:`help templates` for information on templates and filters.
405 See :hg:`help templates` for information on templates and filters.
406 '''
406 '''
407 def demoitems(section, items):
407 def demoitems(section, items):
408 ui.write('[%s]\n' % section)
408 ui.write('[%s]\n' % section)
409 for k, v in sorted(items):
409 for k, v in sorted(items):
410 ui.write('%s = %s\n' % (k, v))
410 ui.write('%s = %s\n' % (k, v))
411
411
412 fn = 'demo.txt'
412 fn = 'demo.txt'
413 tmpdir = tempfile.mkdtemp('', 'kwdemo.')
413 tmpdir = tempfile.mkdtemp('', 'kwdemo.')
414 ui.note(_('creating temporary repository at %s\n') % tmpdir)
414 ui.note(_('creating temporary repository at %s\n') % tmpdir)
415 if repo is None:
415 if repo is None:
416 baseui = ui
416 baseui = ui
417 else:
417 else:
418 baseui = repo.baseui
418 baseui = repo.baseui
419 repo = localrepo.localrepository(baseui, tmpdir, True)
419 repo = localrepo.localrepository(baseui, tmpdir, True)
420 ui.setconfig('keyword', fn, '', 'keyword')
420 ui.setconfig('keyword', fn, '', 'keyword')
421 svn = ui.configbool('keywordset', 'svn')
421 svn = ui.configbool('keywordset', 'svn')
422 # explicitly set keywordset for demo output
422 # explicitly set keywordset for demo output
423 ui.setconfig('keywordset', 'svn', svn, 'keyword')
423 ui.setconfig('keywordset', 'svn', svn, 'keyword')
424
424
425 uikwmaps = ui.configitems('keywordmaps')
425 uikwmaps = ui.configitems('keywordmaps')
426 if args or opts.get('rcfile'):
426 if args or opts.get('rcfile'):
427 ui.status(_('\n\tconfiguration using custom keyword template maps\n'))
427 ui.status(_('\n\tconfiguration using custom keyword template maps\n'))
428 if uikwmaps:
428 if uikwmaps:
429 ui.status(_('\textending current template maps\n'))
429 ui.status(_('\textending current template maps\n'))
430 if opts.get('default') or not uikwmaps:
430 if opts.get('default') or not uikwmaps:
431 if svn:
431 if svn:
432 ui.status(_('\toverriding default svn keywordset\n'))
432 ui.status(_('\toverriding default svn keywordset\n'))
433 else:
433 else:
434 ui.status(_('\toverriding default cvs keywordset\n'))
434 ui.status(_('\toverriding default cvs keywordset\n'))
435 if opts.get('rcfile'):
435 if opts.get('rcfile'):
436 ui.readconfig(opts.get('rcfile'))
436 ui.readconfig(opts.get('rcfile'))
437 if args:
437 if args:
438 # simulate hgrc parsing
438 # simulate hgrc parsing
439 rcmaps = '[keywordmaps]\n%s\n' % '\n'.join(args)
439 rcmaps = '[keywordmaps]\n%s\n' % '\n'.join(args)
440 repo.vfs.write('hgrc', rcmaps)
440 repo.vfs.write('hgrc', rcmaps)
441 ui.readconfig(repo.join('hgrc'))
441 ui.readconfig(repo.join('hgrc'))
442 kwmaps = dict(ui.configitems('keywordmaps'))
442 kwmaps = dict(ui.configitems('keywordmaps'))
443 elif opts.get('default'):
443 elif opts.get('default'):
444 if svn:
444 if svn:
445 ui.status(_('\n\tconfiguration using default svn keywordset\n'))
445 ui.status(_('\n\tconfiguration using default svn keywordset\n'))
446 else:
446 else:
447 ui.status(_('\n\tconfiguration using default cvs keywordset\n'))
447 ui.status(_('\n\tconfiguration using default cvs keywordset\n'))
448 kwmaps = _defaultkwmaps(ui)
448 kwmaps = _defaultkwmaps(ui)
449 if uikwmaps:
449 if uikwmaps:
450 ui.status(_('\tdisabling current template maps\n'))
450 ui.status(_('\tdisabling current template maps\n'))
451 for k, v in kwmaps.iteritems():
451 for k, v in kwmaps.iteritems():
452 ui.setconfig('keywordmaps', k, v, 'keyword')
452 ui.setconfig('keywordmaps', k, v, 'keyword')
453 else:
453 else:
454 ui.status(_('\n\tconfiguration using current keyword template maps\n'))
454 ui.status(_('\n\tconfiguration using current keyword template maps\n'))
455 if uikwmaps:
455 if uikwmaps:
456 kwmaps = dict(uikwmaps)
456 kwmaps = dict(uikwmaps)
457 else:
457 else:
458 kwmaps = _defaultkwmaps(ui)
458 kwmaps = _defaultkwmaps(ui)
459
459
460 uisetup(ui)
460 uisetup(ui)
461 reposetup(ui, repo)
461 reposetup(ui, repo)
462 ui.write(('[extensions]\nkeyword =\n'))
462 ui.write(('[extensions]\nkeyword =\n'))
463 demoitems('keyword', ui.configitems('keyword'))
463 demoitems('keyword', ui.configitems('keyword'))
464 demoitems('keywordset', ui.configitems('keywordset'))
464 demoitems('keywordset', ui.configitems('keywordset'))
465 demoitems('keywordmaps', kwmaps.iteritems())
465 demoitems('keywordmaps', kwmaps.iteritems())
466 keywords = '$' + '$\n$'.join(sorted(kwmaps.keys())) + '$\n'
466 keywords = '$' + '$\n$'.join(sorted(kwmaps.keys())) + '$\n'
467 repo.wvfs.write(fn, keywords)
467 repo.wvfs.write(fn, keywords)
468 repo[None].add([fn])
468 repo[None].add([fn])
469 ui.note(_('\nkeywords written to %s:\n') % fn)
469 ui.note(_('\nkeywords written to %s:\n') % fn)
470 ui.note(keywords)
470 ui.note(keywords)
471 with repo.wlock():
471 with repo.wlock():
472 repo.dirstate.setbranch('demobranch')
472 repo.dirstate.setbranch('demobranch')
473 for name, cmd in ui.configitems('hooks'):
473 for name, cmd in ui.configitems('hooks'):
474 if name.split('.', 1)[0].find('commit') > -1:
474 if name.split('.', 1)[0].find('commit') > -1:
475 repo.ui.setconfig('hooks', name, '', 'keyword')
475 repo.ui.setconfig('hooks', name, '', 'keyword')
476 msg = _('hg keyword configuration and expansion example')
476 msg = _('hg keyword configuration and expansion example')
477 ui.note(("hg ci -m '%s'\n" % msg))
477 ui.note(("hg ci -m '%s'\n" % msg))
478 repo.commit(text=msg)
478 repo.commit(text=msg)
479 ui.status(_('\n\tkeywords expanded\n'))
479 ui.status(_('\n\tkeywords expanded\n'))
480 ui.write(repo.wread(fn))
480 ui.write(repo.wread(fn))
481 repo.wvfs.rmtree(repo.root)
481 repo.wvfs.rmtree(repo.root)
482
482
483 @command('kwexpand',
483 @command('kwexpand',
484 commands.walkopts,
484 commands.walkopts,
485 _('hg kwexpand [OPTION]... [FILE]...'),
485 _('hg kwexpand [OPTION]... [FILE]...'),
486 inferrepo=True)
486 inferrepo=True)
487 def expand(ui, repo, *pats, **opts):
487 def expand(ui, repo, *pats, **opts):
488 '''expand keywords in the working directory
488 '''expand keywords in the working directory
489
489
490 Run after (re)enabling keyword expansion.
490 Run after (re)enabling keyword expansion.
491
491
492 kwexpand refuses to run if given files contain local changes.
492 kwexpand refuses to run if given files contain local changes.
493 '''
493 '''
494 # 3rd argument sets expansion to True
494 # 3rd argument sets expansion to True
495 _kwfwrite(ui, repo, True, *pats, **opts)
495 _kwfwrite(ui, repo, True, *pats, **opts)
496
496
497 @command('kwfiles',
497 @command('kwfiles',
498 [('A', 'all', None, _('show keyword status flags of all files')),
498 [('A', 'all', None, _('show keyword status flags of all files')),
499 ('i', 'ignore', None, _('show files excluded from expansion')),
499 ('i', 'ignore', None, _('show files excluded from expansion')),
500 ('u', 'unknown', None, _('only show unknown (not tracked) files')),
500 ('u', 'unknown', None, _('only show unknown (not tracked) files')),
501 ] + commands.walkopts,
501 ] + commands.walkopts,
502 _('hg kwfiles [OPTION]... [FILE]...'),
502 _('hg kwfiles [OPTION]... [FILE]...'),
503 inferrepo=True)
503 inferrepo=True)
504 def files(ui, repo, *pats, **opts):
504 def files(ui, repo, *pats, **opts):
505 '''show files configured for keyword expansion
505 '''show files configured for keyword expansion
506
506
507 List which files in the working directory are matched by the
507 List which files in the working directory are matched by the
508 [keyword] configuration patterns.
508 [keyword] configuration patterns.
509
509
510 Useful to prevent inadvertent keyword expansion and to speed up
510 Useful to prevent inadvertent keyword expansion and to speed up
511 execution by including only files that are actual candidates for
511 execution by including only files that are actual candidates for
512 expansion.
512 expansion.
513
513
514 See :hg:`help keyword` on how to construct patterns both for
514 See :hg:`help keyword` on how to construct patterns both for
515 inclusion and exclusion of files.
515 inclusion and exclusion of files.
516
516
517 With -A/--all and -v/--verbose the codes used to show the status
517 With -A/--all and -v/--verbose the codes used to show the status
518 of files are::
518 of files are::
519
519
520 K = keyword expansion candidate
520 K = keyword expansion candidate
521 k = keyword expansion candidate (not tracked)
521 k = keyword expansion candidate (not tracked)
522 I = ignored
522 I = ignored
523 i = ignored (not tracked)
523 i = ignored (not tracked)
524 '''
524 '''
525 kwt = kwtools['templater']
525 kwt = kwtools['templater']
526 wctx = repo[None]
526 wctx = repo[None]
527 status = _status(ui, repo, wctx, kwt, *pats, **opts)
527 status = _status(ui, repo, wctx, kwt, *pats, **opts)
528 if pats:
528 if pats:
529 cwd = repo.getcwd()
529 cwd = repo.getcwd()
530 else:
530 else:
531 cwd = ''
531 cwd = ''
532 files = []
532 files = []
533 if not opts.get('unknown') or opts.get('all'):
533 if not opts.get('unknown') or opts.get('all'):
534 files = sorted(status.modified + status.added + status.clean)
534 files = sorted(status.modified + status.added + status.clean)
535 kwfiles = kwt.iskwfile(files, wctx)
535 kwfiles = kwt.iskwfile(files, wctx)
536 kwdeleted = kwt.iskwfile(status.deleted, wctx)
536 kwdeleted = kwt.iskwfile(status.deleted, wctx)
537 kwunknown = kwt.iskwfile(status.unknown, wctx)
537 kwunknown = kwt.iskwfile(status.unknown, wctx)
538 if not opts.get('ignore') or opts.get('all'):
538 if not opts.get('ignore') or opts.get('all'):
539 showfiles = kwfiles, kwdeleted, kwunknown
539 showfiles = kwfiles, kwdeleted, kwunknown
540 else:
540 else:
541 showfiles = [], [], []
541 showfiles = [], [], []
542 if opts.get('all') or opts.get('ignore'):
542 if opts.get('all') or opts.get('ignore'):
543 showfiles += ([f for f in files if f not in kwfiles],
543 showfiles += ([f for f in files if f not in kwfiles],
544 [f for f in status.unknown if f not in kwunknown])
544 [f for f in status.unknown if f not in kwunknown])
545 kwlabels = 'enabled deleted enabledunknown ignored ignoredunknown'.split()
545 kwlabels = 'enabled deleted enabledunknown ignored ignoredunknown'.split()
546 kwstates = zip(kwlabels, 'K!kIi', showfiles)
546 kwstates = zip(kwlabels, 'K!kIi', showfiles)
547 fm = ui.formatter('kwfiles', opts)
547 fm = ui.formatter('kwfiles', opts)
548 fmt = '%.0s%s\n'
548 fmt = '%.0s%s\n'
549 if opts.get('all') or ui.verbose:
549 if opts.get('all') or ui.verbose:
550 fmt = '%s %s\n'
550 fmt = '%s %s\n'
551 for kwstate, char, filenames in kwstates:
551 for kwstate, char, filenames in kwstates:
552 label = 'kwfiles.' + kwstate
552 label = 'kwfiles.' + kwstate
553 for f in filenames:
553 for f in filenames:
554 fm.startitem()
554 fm.startitem()
555 fm.write('kwstatus path', fmt, char,
555 fm.write('kwstatus path', fmt, char,
556 repo.pathto(f, cwd), label=label)
556 repo.pathto(f, cwd), label=label)
557 fm.end()
557 fm.end()
558
558
559 @command('kwshrink',
559 @command('kwshrink',
560 commands.walkopts,
560 commands.walkopts,
561 _('hg kwshrink [OPTION]... [FILE]...'),
561 _('hg kwshrink [OPTION]... [FILE]...'),
562 inferrepo=True)
562 inferrepo=True)
563 def shrink(ui, repo, *pats, **opts):
563 def shrink(ui, repo, *pats, **opts):
564 '''revert expanded keywords in the working directory
564 '''revert expanded keywords in the working directory
565
565
566 Must be run before changing/disabling active keywords.
566 Must be run before changing/disabling active keywords.
567
567
568 kwshrink refuses to run if given files contain local changes.
568 kwshrink refuses to run if given files contain local changes.
569 '''
569 '''
570 # 3rd argument sets expansion to False
570 # 3rd argument sets expansion to False
571 _kwfwrite(ui, repo, False, *pats, **opts)
571 _kwfwrite(ui, repo, False, *pats, **opts)
572
572
573
573
574 def uisetup(ui):
574 def uisetup(ui):
575 ''' Monkeypatches dispatch._parse to retrieve user command.'''
575 ''' Monkeypatches dispatch._parse to retrieve user command.'''
576
576
577 def kwdispatch_parse(orig, ui, args):
577 def kwdispatch_parse(orig, ui, args):
578 '''Monkeypatch dispatch._parse to obtain running hg command.'''
578 '''Monkeypatch dispatch._parse to obtain running hg command.'''
579 cmd, func, args, options, cmdoptions = orig(ui, args)
579 cmd, func, args, options, cmdoptions = orig(ui, args)
580 kwtools['hgcmd'] = cmd
580 kwtools['hgcmd'] = cmd
581 return cmd, func, args, options, cmdoptions
581 return cmd, func, args, options, cmdoptions
582
582
583 extensions.wrapfunction(dispatch, '_parse', kwdispatch_parse)
583 extensions.wrapfunction(dispatch, '_parse', kwdispatch_parse)
584
584
585 def reposetup(ui, repo):
585 def reposetup(ui, repo):
586 '''Sets up repo as kwrepo for keyword substitution.
586 '''Sets up repo as kwrepo for keyword substitution.
587 Overrides file method to return kwfilelog instead of filelog
587 Overrides file method to return kwfilelog instead of filelog
588 if file matches user configuration.
588 if file matches user configuration.
589 Wraps commit to overwrite configured files with updated
589 Wraps commit to overwrite configured files with updated
590 keyword substitutions.
590 keyword substitutions.
591 Monkeypatches patch and webcommands.'''
591 Monkeypatches patch and webcommands.'''
592
592
593 try:
593 try:
594 if (not repo.local() or kwtools['hgcmd'] in nokwcommands.split()
594 if (not repo.local() or kwtools['hgcmd'] in nokwcommands.split()
595 or '.hg' in util.splitpath(repo.root)
595 or '.hg' in util.splitpath(repo.root)
596 or repo._url.startswith('bundle:')):
596 or repo._url.startswith('bundle:')):
597 return
597 return
598 except AttributeError:
598 except AttributeError:
599 pass
599 pass
600
600
601 inc, exc = [], ['.hg*']
601 inc, exc = [], ['.hg*']
602 for pat, opt in ui.configitems('keyword'):
602 for pat, opt in ui.configitems('keyword'):
603 if opt != 'ignore':
603 if opt != 'ignore':
604 inc.append(pat)
604 inc.append(pat)
605 else:
605 else:
606 exc.append(pat)
606 exc.append(pat)
607 if not inc:
607 if not inc:
608 return
608 return
609
609
610 kwtools['templater'] = kwt = kwtemplater(ui, repo, inc, exc)
610 kwtools['templater'] = kwt = kwtemplater(ui, repo, inc, exc)
611
611
612 class kwrepo(repo.__class__):
612 class kwrepo(repo.__class__):
613 def file(self, f):
613 def file(self, f):
614 if f[0] == '/':
614 if f[0] == '/':
615 f = f[1:]
615 f = f[1:]
616 return kwfilelog(self.svfs, kwt, f)
616 return kwfilelog(self.svfs, kwt, f)
617
617
618 def wread(self, filename):
618 def wread(self, filename):
619 data = super(kwrepo, self).wread(filename)
619 data = super(kwrepo, self).wread(filename)
620 return kwt.wread(filename, data)
620 return kwt.wread(filename, data)
621
621
622 def commit(self, *args, **opts):
622 def commit(self, *args, **opts):
623 # use custom commitctx for user commands
623 # use custom commitctx for user commands
624 # other extensions can still wrap repo.commitctx directly
624 # other extensions can still wrap repo.commitctx directly
625 self.commitctx = self.kwcommitctx
625 self.commitctx = self.kwcommitctx
626 try:
626 try:
627 return super(kwrepo, self).commit(*args, **opts)
627 return super(kwrepo, self).commit(*args, **opts)
628 finally:
628 finally:
629 del self.commitctx
629 del self.commitctx
630
630
631 def kwcommitctx(self, ctx, error=False):
631 def kwcommitctx(self, ctx, error=False):
632 n = super(kwrepo, self).commitctx(ctx, error)
632 n = super(kwrepo, self).commitctx(ctx, error)
633 # no lock needed, only called from repo.commit() which already locks
633 # no lock needed, only called from repo.commit() which already locks
634 if not kwt.postcommit:
634 if not kwt.postcommit:
635 restrict = kwt.restrict
635 restrict = kwt.restrict
636 kwt.restrict = True
636 kwt.restrict = True
637 kwt.overwrite(self[n], sorted(ctx.added() + ctx.modified()),
637 kwt.overwrite(self[n], sorted(ctx.added() + ctx.modified()),
638 False, True)
638 False, True)
639 kwt.restrict = restrict
639 kwt.restrict = restrict
640 return n
640 return n
641
641
642 def rollback(self, dryrun=False, force=False):
642 def rollback(self, dryrun=False, force=False):
643 wlock = self.wlock()
643 wlock = self.wlock()
644 origrestrict = kwt.restrict
644 origrestrict = kwt.restrict
645 try:
645 try:
646 if not dryrun:
646 if not dryrun:
647 changed = self['.'].files()
647 changed = self['.'].files()
648 ret = super(kwrepo, self).rollback(dryrun, force)
648 ret = super(kwrepo, self).rollback(dryrun, force)
649 if not dryrun:
649 if not dryrun:
650 ctx = self['.']
650 ctx = self['.']
651 modified, added = _preselect(ctx.status(), changed)
651 modified, added = _preselect(ctx.status(), changed)
652 kwt.restrict = False
652 kwt.restrict = False
653 kwt.overwrite(ctx, modified, True, True)
653 kwt.overwrite(ctx, modified, True, True)
654 kwt.overwrite(ctx, added, True, False)
654 kwt.overwrite(ctx, added, True, False)
655 return ret
655 return ret
656 finally:
656 finally:
657 kwt.restrict = origrestrict
657 kwt.restrict = origrestrict
658 wlock.release()
658 wlock.release()
659
659
660 # monkeypatches
660 # monkeypatches
661 def kwpatchfile_init(orig, self, ui, gp, backend, store, eolmode=None):
661 def kwpatchfile_init(orig, self, ui, gp, backend, store, eolmode=None):
662 '''Monkeypatch/wrap patch.patchfile.__init__ to avoid
662 '''Monkeypatch/wrap patch.patchfile.__init__ to avoid
663 rejects or conflicts due to expanded keywords in working dir.'''
663 rejects or conflicts due to expanded keywords in working dir.'''
664 orig(self, ui, gp, backend, store, eolmode)
664 orig(self, ui, gp, backend, store, eolmode)
665 # shrink keywords read from working dir
665 # shrink keywords read from working dir
666 self.lines = kwt.shrinklines(self.fname, self.lines)
666 self.lines = kwt.shrinklines(self.fname, self.lines)
667
667
668 def kwdiff(orig, *args, **kwargs):
668 def kwdiff(orig, *args, **kwargs):
669 '''Monkeypatch patch.diff to avoid expansion.'''
669 '''Monkeypatch patch.diff to avoid expansion.'''
670 kwt.restrict = True
670 kwt.restrict = True
671 return orig(*args, **kwargs)
671 return orig(*args, **kwargs)
672
672
673 def kwweb_skip(orig, web, req, tmpl):
673 def kwweb_skip(orig, web, req, tmpl):
674 '''Wraps webcommands.x turning off keyword expansion.'''
674 '''Wraps webcommands.x turning off keyword expansion.'''
675 kwt.match = util.never
675 kwt.match = util.never
676 return orig(web, req, tmpl)
676 return orig(web, req, tmpl)
677
677
678 def kw_amend(orig, ui, repo, commitfunc, old, extra, pats, opts):
678 def kw_amend(orig, ui, repo, commitfunc, old, extra, pats, opts):
679 '''Wraps cmdutil.amend expanding keywords after amend.'''
679 '''Wraps cmdutil.amend expanding keywords after amend.'''
680 with repo.wlock():
680 with repo.wlock():
681 kwt.postcommit = True
681 kwt.postcommit = True
682 newid = orig(ui, repo, commitfunc, old, extra, pats, opts)
682 newid = orig(ui, repo, commitfunc, old, extra, pats, opts)
683 if newid != old.node():
683 if newid != old.node():
684 ctx = repo[newid]
684 ctx = repo[newid]
685 kwt.restrict = True
685 kwt.restrict = True
686 kwt.overwrite(ctx, ctx.files(), False, True)
686 kwt.overwrite(ctx, ctx.files(), False, True)
687 kwt.restrict = False
687 kwt.restrict = False
688 return newid
688 return newid
689
689
690 def kw_copy(orig, ui, repo, pats, opts, rename=False):
690 def kw_copy(orig, ui, repo, pats, opts, rename=False):
691 '''Wraps cmdutil.copy so that copy/rename destinations do not
691 '''Wraps cmdutil.copy so that copy/rename destinations do not
692 contain expanded keywords.
692 contain expanded keywords.
693 Note that the source of a regular file destination may also be a
693 Note that the source of a regular file destination may also be a
694 symlink:
694 symlink:
695 hg cp sym x -> x is symlink
695 hg cp sym x -> x is symlink
696 cp sym x; hg cp -A sym x -> x is file (maybe expanded keywords)
696 cp sym x; hg cp -A sym x -> x is file (maybe expanded keywords)
697 For the latter we have to follow the symlink to find out whether its
697 For the latter we have to follow the symlink to find out whether its
698 target is configured for expansion and we therefore must unexpand the
698 target is configured for expansion and we therefore must unexpand the
699 keywords in the destination.'''
699 keywords in the destination.'''
700 with repo.wlock():
700 with repo.wlock():
701 orig(ui, repo, pats, opts, rename)
701 orig(ui, repo, pats, opts, rename)
702 if opts.get('dry_run'):
702 if opts.get('dry_run'):
703 return
703 return
704 wctx = repo[None]
704 wctx = repo[None]
705 cwd = repo.getcwd()
705 cwd = repo.getcwd()
706
706
707 def haskwsource(dest):
707 def haskwsource(dest):
708 '''Returns true if dest is a regular file and configured for
708 '''Returns true if dest is a regular file and configured for
709 expansion or a symlink which points to a file configured for
709 expansion or a symlink which points to a file configured for
710 expansion. '''
710 expansion. '''
711 source = repo.dirstate.copied(dest)
711 source = repo.dirstate.copied(dest)
712 if 'l' in wctx.flags(source):
712 if 'l' in wctx.flags(source):
713 source = pathutil.canonpath(repo.root, cwd,
713 source = pathutil.canonpath(repo.root, cwd,
714 os.path.realpath(source))
714 os.path.realpath(source))
715 return kwt.match(source)
715 return kwt.match(source)
716
716
717 candidates = [f for f in repo.dirstate.copies() if
717 candidates = [f for f in repo.dirstate.copies() if
718 'l' not in wctx.flags(f) and haskwsource(f)]
718 'l' not in wctx.flags(f) and haskwsource(f)]
719 kwt.overwrite(wctx, candidates, False, False)
719 kwt.overwrite(wctx, candidates, False, False)
720
720
721 def kw_dorecord(orig, ui, repo, commitfunc, *pats, **opts):
721 def kw_dorecord(orig, ui, repo, commitfunc, *pats, **opts):
722 '''Wraps record.dorecord expanding keywords after recording.'''
722 '''Wraps record.dorecord expanding keywords after recording.'''
723 with repo.wlock():
723 with repo.wlock():
724 # record returns 0 even when nothing has changed
724 # record returns 0 even when nothing has changed
725 # therefore compare nodes before and after
725 # therefore compare nodes before and after
726 kwt.postcommit = True
726 kwt.postcommit = True
727 ctx = repo['.']
727 ctx = repo['.']
728 wstatus = ctx.status()
728 wstatus = ctx.status()
729 ret = orig(ui, repo, commitfunc, *pats, **opts)
729 ret = orig(ui, repo, commitfunc, *pats, **opts)
730 recctx = repo['.']
730 recctx = repo['.']
731 if ctx != recctx:
731 if ctx != recctx:
732 modified, added = _preselect(wstatus, recctx.files())
732 modified, added = _preselect(wstatus, recctx.files())
733 kwt.restrict = False
733 kwt.restrict = False
734 kwt.overwrite(recctx, modified, False, True)
734 kwt.overwrite(recctx, modified, False, True)
735 kwt.overwrite(recctx, added, False, True, True)
735 kwt.overwrite(recctx, added, False, True, True)
736 kwt.restrict = True
736 kwt.restrict = True
737 return ret
737 return ret
738
738
739 def kwfilectx_cmp(orig, self, fctx):
739 def kwfilectx_cmp(orig, self, fctx):
740 # keyword affects data size, comparing wdir and filelog size does
740 # keyword affects data size, comparing wdir and filelog size does
741 # not make sense
741 # not make sense
742 if (fctx._filenode is None and
742 if (fctx._filenode is None and
743 (self._repo._encodefilterpats or
743 (self._repo._encodefilterpats or
744 kwt.match(fctx.path()) and 'l' not in fctx.flags() or
744 kwt.match(fctx.path()) and 'l' not in fctx.flags() or
745 self.size() - 4 == fctx.size()) or
745 self.size() - 4 == fctx.size()) or
746 self.size() == fctx.size()):
746 self.size() == fctx.size()):
747 return self._filelog.cmp(self._filenode, fctx.data())
747 return self._filelog.cmp(self._filenode, fctx.data())
748 return True
748 return True
749
749
750 extensions.wrapfunction(context.filectx, 'cmp', kwfilectx_cmp)
750 extensions.wrapfunction(context.filectx, 'cmp', kwfilectx_cmp)
751 extensions.wrapfunction(patch.patchfile, '__init__', kwpatchfile_init)
751 extensions.wrapfunction(patch.patchfile, '__init__', kwpatchfile_init)
752 extensions.wrapfunction(patch, 'diff', kwdiff)
752 extensions.wrapfunction(patch, 'diff', kwdiff)
753 extensions.wrapfunction(cmdutil, 'amend', kw_amend)
753 extensions.wrapfunction(cmdutil, 'amend', kw_amend)
754 extensions.wrapfunction(cmdutil, 'copy', kw_copy)
754 extensions.wrapfunction(cmdutil, 'copy', kw_copy)
755 extensions.wrapfunction(cmdutil, 'dorecord', kw_dorecord)
755 extensions.wrapfunction(cmdutil, 'dorecord', kw_dorecord)
756 for c in 'annotate changeset rev filediff diff'.split():
756 for c in 'annotate changeset rev filediff diff'.split():
757 extensions.wrapfunction(webcommands, c, kwweb_skip)
757 extensions.wrapfunction(webcommands, c, kwweb_skip)
758 repo.__class__ = kwrepo
758 repo.__class__ = kwrepo
@@ -1,140 +1,140 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''track large binary files
9 '''track large binary files
10
10
11 Large binary files tend to be not very compressible, not very
11 Large binary files tend to be not very compressible, not very
12 diffable, and not at all mergeable. Such files are not handled
12 diffable, and not at all mergeable. Such files are not handled
13 efficiently by Mercurial's storage format (revlog), which is based on
13 efficiently by Mercurial's storage format (revlog), which is based on
14 compressed binary deltas; storing large binary files as regular
14 compressed binary deltas; storing large binary files as regular
15 Mercurial files wastes bandwidth and disk space and increases
15 Mercurial files wastes bandwidth and disk space and increases
16 Mercurial's memory usage. The largefiles extension addresses these
16 Mercurial's memory usage. The largefiles extension addresses these
17 problems by adding a centralized client-server layer on top of
17 problems by adding a centralized client-server layer on top of
18 Mercurial: largefiles live in a *central store* out on the network
18 Mercurial: largefiles live in a *central store* out on the network
19 somewhere, and you only fetch the revisions that you need when you
19 somewhere, and you only fetch the revisions that you need when you
20 need them.
20 need them.
21
21
22 largefiles works by maintaining a "standin file" in .hglf/ for each
22 largefiles works by maintaining a "standin file" in .hglf/ for each
23 largefile. The standins are small (41 bytes: an SHA-1 hash plus
23 largefile. The standins are small (41 bytes: an SHA-1 hash plus
24 newline) and are tracked by Mercurial. Largefile revisions are
24 newline) and are tracked by Mercurial. Largefile revisions are
25 identified by the SHA-1 hash of their contents, which is written to
25 identified by the SHA-1 hash of their contents, which is written to
26 the standin. largefiles uses that revision ID to get/put largefile
26 the standin. largefiles uses that revision ID to get/put largefile
27 revisions from/to the central store. This saves both disk space and
27 revisions from/to the central store. This saves both disk space and
28 bandwidth, since you don't need to retrieve all historical revisions
28 bandwidth, since you don't need to retrieve all historical revisions
29 of large files when you clone or pull.
29 of large files when you clone or pull.
30
30
31 To start a new repository or add new large binary files, just add
31 To start a new repository or add new large binary files, just add
32 --large to your :hg:`add` command. For example::
32 --large to your :hg:`add` command. For example::
33
33
34 $ dd if=/dev/urandom of=randomdata count=2000
34 $ dd if=/dev/urandom of=randomdata count=2000
35 $ hg add --large randomdata
35 $ hg add --large randomdata
36 $ hg commit -m "add randomdata as a largefile"
36 $ hg commit -m "add randomdata as a largefile"
37
37
38 When you push a changeset that adds/modifies largefiles to a remote
38 When you push a changeset that adds/modifies largefiles to a remote
39 repository, its largefile revisions will be uploaded along with it.
39 repository, its largefile revisions will be uploaded along with it.
40 Note that the remote Mercurial must also have the largefiles extension
40 Note that the remote Mercurial must also have the largefiles extension
41 enabled for this to work.
41 enabled for this to work.
42
42
43 When you pull a changeset that affects largefiles from a remote
43 When you pull a changeset that affects largefiles from a remote
44 repository, the largefiles for the changeset will by default not be
44 repository, the largefiles for the changeset will by default not be
45 pulled down. However, when you update to such a revision, any
45 pulled down. However, when you update to such a revision, any
46 largefiles needed by that revision are downloaded and cached (if
46 largefiles needed by that revision are downloaded and cached (if
47 they have never been downloaded before). One way to pull largefiles
47 they have never been downloaded before). One way to pull largefiles
48 when pulling is thus to use --update, which will update your working
48 when pulling is thus to use --update, which will update your working
49 copy to the latest pulled revision (and thereby downloading any new
49 copy to the latest pulled revision (and thereby downloading any new
50 largefiles).
50 largefiles).
51
51
52 If you want to pull largefiles you don't need for update yet, then
52 If you want to pull largefiles you don't need for update yet, then
53 you can use pull with the `--lfrev` option or the :hg:`lfpull` command.
53 you can use pull with the `--lfrev` option or the :hg:`lfpull` command.
54
54
55 If you know you are pulling from a non-default location and want to
55 If you know you are pulling from a non-default location and want to
56 download all the largefiles that correspond to the new changesets at
56 download all the largefiles that correspond to the new changesets at
57 the same time, then you can pull with `--lfrev "pulled()"`.
57 the same time, then you can pull with `--lfrev "pulled()"`.
58
58
59 If you just want to ensure that you will have the largefiles needed to
59 If you just want to ensure that you will have the largefiles needed to
60 merge or rebase with new heads that you are pulling, then you can pull
60 merge or rebase with new heads that you are pulling, then you can pull
61 with `--lfrev "head(pulled())"` flag to pre-emptively download any largefiles
61 with `--lfrev "head(pulled())"` flag to pre-emptively download any largefiles
62 that are new in the heads you are pulling.
62 that are new in the heads you are pulling.
63
63
64 Keep in mind that network access may now be required to update to
64 Keep in mind that network access may now be required to update to
65 changesets that you have not previously updated to. The nature of the
65 changesets that you have not previously updated to. The nature of the
66 largefiles extension means that updating is no longer guaranteed to
66 largefiles extension means that updating is no longer guaranteed to
67 be a local-only operation.
67 be a local-only operation.
68
68
69 If you already have large files tracked by Mercurial without the
69 If you already have large files tracked by Mercurial without the
70 largefiles extension, you will need to convert your repository in
70 largefiles extension, you will need to convert your repository in
71 order to benefit from largefiles. This is done with the
71 order to benefit from largefiles. This is done with the
72 :hg:`lfconvert` command::
72 :hg:`lfconvert` command::
73
73
74 $ hg lfconvert --size 10 oldrepo newrepo
74 $ hg lfconvert --size 10 oldrepo newrepo
75
75
76 In repositories that already have largefiles in them, any new file
76 In repositories that already have largefiles in them, any new file
77 over 10MB will automatically be added as a largefile. To change this
77 over 10MB will automatically be added as a largefile. To change this
78 threshold, set ``largefiles.minsize`` in your Mercurial config file
78 threshold, set ``largefiles.minsize`` in your Mercurial config file
79 to the minimum size in megabytes to track as a largefile, or use the
79 to the minimum size in megabytes to track as a largefile, or use the
80 --lfsize option to the add command (also in megabytes)::
80 --lfsize option to the add command (also in megabytes)::
81
81
82 [largefiles]
82 [largefiles]
83 minsize = 2
83 minsize = 2
84
84
85 $ hg add --lfsize 2
85 $ hg add --lfsize 2
86
86
87 The ``largefiles.patterns`` config option allows you to specify a list
87 The ``largefiles.patterns`` config option allows you to specify a list
88 of filename patterns (see :hg:`help patterns`) that should always be
88 of filename patterns (see :hg:`help patterns`) that should always be
89 tracked as largefiles::
89 tracked as largefiles::
90
90
91 [largefiles]
91 [largefiles]
92 patterns =
92 patterns =
93 *.jpg
93 *.jpg
94 re:.*\.(png|bmp)$
94 re:.*\.(png|bmp)$
95 library.zip
95 library.zip
96 content/audio/*
96 content/audio/*
97
97
98 Files that match one of these patterns will be added as largefiles
98 Files that match one of these patterns will be added as largefiles
99 regardless of their size.
99 regardless of their size.
100
100
101 The ``largefiles.minsize`` and ``largefiles.patterns`` config options
101 The ``largefiles.minsize`` and ``largefiles.patterns`` config options
102 will be ignored for any repositories not already containing a
102 will be ignored for any repositories not already containing a
103 largefile. To add the first largefile to a repository, you must
103 largefile. To add the first largefile to a repository, you must
104 explicitly do so with the --large flag passed to the :hg:`add`
104 explicitly do so with the --large flag passed to the :hg:`add`
105 command.
105 command.
106 '''
106 '''
107 from __future__ import absolute_import
107 from __future__ import absolute_import
108
108
109 from mercurial import (
109 from mercurial import (
110 hg,
110 hg,
111 localrepo,
111 localrepo,
112 )
112 )
113
113
114 from . import (
114 from . import (
115 lfcommands,
115 lfcommands,
116 overrides,
116 overrides,
117 proto,
117 proto,
118 reposetup,
118 reposetup,
119 uisetup as uisetupmod,
119 uisetup as uisetupmod,
120 )
120 )
121
121
122 # Note for extension authors: ONLY specify testedwith = 'internal' for
122 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
123 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
123 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
124 # be specifying the version(s) of Mercurial they are tested with, or
124 # be specifying the version(s) of Mercurial they are tested with, or
125 # leave the attribute unspecified.
125 # leave the attribute unspecified.
126 testedwith = 'internal'
126 testedwith = 'ships-with-hg-core'
127
127
128 reposetup = reposetup.reposetup
128 reposetup = reposetup.reposetup
129
129
130 def featuresetup(ui, supported):
130 def featuresetup(ui, supported):
131 # don't die on seeing a repo with the largefiles requirement
131 # don't die on seeing a repo with the largefiles requirement
132 supported |= set(['largefiles'])
132 supported |= set(['largefiles'])
133
133
134 def uisetup(ui):
134 def uisetup(ui):
135 localrepo.localrepository.featuresetupfuncs.add(featuresetup)
135 localrepo.localrepository.featuresetupfuncs.add(featuresetup)
136 hg.wirepeersetupfuncs.append(proto.wirereposetup)
136 hg.wirepeersetupfuncs.append(proto.wirereposetup)
137 uisetupmod.uisetup(ui)
137 uisetupmod.uisetup(ui)
138
138
139 cmdtable = lfcommands.cmdtable
139 cmdtable = lfcommands.cmdtable
140 revsetpredicate = overrides.revsetpredicate
140 revsetpredicate = overrides.revsetpredicate
@@ -1,129 +1,129 b''
1 # logtoprocess.py - send ui.log() data to a subprocess
1 # logtoprocess.py - send ui.log() data to a subprocess
2 #
2 #
3 # Copyright 2016 Facebook, Inc.
3 # Copyright 2016 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 """Send ui.log() data to a subprocess (EXPERIMENTAL)
7 """Send ui.log() data to a subprocess (EXPERIMENTAL)
8
8
9 This extension lets you specify a shell command per ui.log() event,
9 This extension lets you specify a shell command per ui.log() event,
10 sending all remaining arguments to as environment variables to that command.
10 sending all remaining arguments to as environment variables to that command.
11
11
12 Each positional argument to the method results in a `MSG[N]` key in the
12 Each positional argument to the method results in a `MSG[N]` key in the
13 environment, starting at 1 (so `MSG1`, `MSG2`, etc.). Each keyword argument
13 environment, starting at 1 (so `MSG1`, `MSG2`, etc.). Each keyword argument
14 is set as a `OPT_UPPERCASE_KEY` variable (so the key is uppercased, and
14 is set as a `OPT_UPPERCASE_KEY` variable (so the key is uppercased, and
15 prefixed with `OPT_`). The original event name is passed in the `EVENT`
15 prefixed with `OPT_`). The original event name is passed in the `EVENT`
16 environment variable, and the process ID of mercurial is given in `HGPID`.
16 environment variable, and the process ID of mercurial is given in `HGPID`.
17
17
18 So given a call `ui.log('foo', 'bar', 'baz', spam='eggs'), a script configured
18 So given a call `ui.log('foo', 'bar', 'baz', spam='eggs'), a script configured
19 for the `foo` event can expect an environment with `MSG1=bar`, `MSG2=baz`, and
19 for the `foo` event can expect an environment with `MSG1=bar`, `MSG2=baz`, and
20 `OPT_SPAM=eggs`.
20 `OPT_SPAM=eggs`.
21
21
22 Scripts are configured in the `[logtoprocess]` section, each key an event name.
22 Scripts are configured in the `[logtoprocess]` section, each key an event name.
23 For example::
23 For example::
24
24
25 [logtoprocess]
25 [logtoprocess]
26 commandexception = echo "$MSG2$MSG3" > /var/log/mercurial_exceptions.log
26 commandexception = echo "$MSG2$MSG3" > /var/log/mercurial_exceptions.log
27
27
28 would log the warning message and traceback of any failed command dispatch.
28 would log the warning message and traceback of any failed command dispatch.
29
29
30 Scripts are run asychronously as detached daemon processes; mercurial will
30 Scripts are run asychronously as detached daemon processes; mercurial will
31 not ensure that they exit cleanly.
31 not ensure that they exit cleanly.
32
32
33 """
33 """
34
34
35 from __future__ import absolute_import
35 from __future__ import absolute_import
36
36
37 import itertools
37 import itertools
38 import os
38 import os
39 import platform
39 import platform
40 import subprocess
40 import subprocess
41 import sys
41 import sys
42
42
43 # Note for extension authors: ONLY specify testedwith = 'internal' for
43 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
44 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
44 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
45 # be specifying the version(s) of Mercurial they are tested with, or
45 # be specifying the version(s) of Mercurial they are tested with, or
46 # leave the attribute unspecified.
46 # leave the attribute unspecified.
47 testedwith = 'internal'
47 testedwith = 'ships-with-hg-core'
48
48
49 def uisetup(ui):
49 def uisetup(ui):
50 if platform.system() == 'Windows':
50 if platform.system() == 'Windows':
51 # no fork on Windows, but we can create a detached process
51 # no fork on Windows, but we can create a detached process
52 # https://msdn.microsoft.com/en-us/library/windows/desktop/ms684863.aspx
52 # https://msdn.microsoft.com/en-us/library/windows/desktop/ms684863.aspx
53 # No stdlib constant exists for this value
53 # No stdlib constant exists for this value
54 DETACHED_PROCESS = 0x00000008
54 DETACHED_PROCESS = 0x00000008
55 _creationflags = DETACHED_PROCESS | subprocess.CREATE_NEW_PROCESS_GROUP
55 _creationflags = DETACHED_PROCESS | subprocess.CREATE_NEW_PROCESS_GROUP
56
56
57 def runshellcommand(script, env):
57 def runshellcommand(script, env):
58 # we can't use close_fds *and* redirect stdin. I'm not sure that we
58 # we can't use close_fds *and* redirect stdin. I'm not sure that we
59 # need to because the detached process has no console connection.
59 # need to because the detached process has no console connection.
60 subprocess.Popen(
60 subprocess.Popen(
61 script, shell=True, env=env, close_fds=True,
61 script, shell=True, env=env, close_fds=True,
62 creationflags=_creationflags)
62 creationflags=_creationflags)
63 else:
63 else:
64 def runshellcommand(script, env):
64 def runshellcommand(script, env):
65 # double-fork to completely detach from the parent process
65 # double-fork to completely detach from the parent process
66 # based on http://code.activestate.com/recipes/278731
66 # based on http://code.activestate.com/recipes/278731
67 pid = os.fork()
67 pid = os.fork()
68 if pid:
68 if pid:
69 # parent
69 # parent
70 return
70 return
71 # subprocess.Popen() forks again, all we need to add is
71 # subprocess.Popen() forks again, all we need to add is
72 # flag the new process as a new session.
72 # flag the new process as a new session.
73 if sys.version_info < (3, 2):
73 if sys.version_info < (3, 2):
74 newsession = {'preexec_fn': os.setsid}
74 newsession = {'preexec_fn': os.setsid}
75 else:
75 else:
76 newsession = {'start_new_session': True}
76 newsession = {'start_new_session': True}
77 try:
77 try:
78 # connect stdin to devnull to make sure the subprocess can't
78 # connect stdin to devnull to make sure the subprocess can't
79 # muck up that stream for mercurial.
79 # muck up that stream for mercurial.
80 subprocess.Popen(
80 subprocess.Popen(
81 script, shell=True, stdin=open(os.devnull, 'r'), env=env,
81 script, shell=True, stdin=open(os.devnull, 'r'), env=env,
82 close_fds=True, **newsession)
82 close_fds=True, **newsession)
83 finally:
83 finally:
84 # mission accomplished, this child needs to exit and not
84 # mission accomplished, this child needs to exit and not
85 # continue the hg process here.
85 # continue the hg process here.
86 os._exit(0)
86 os._exit(0)
87
87
88 class logtoprocessui(ui.__class__):
88 class logtoprocessui(ui.__class__):
89 def log(self, event, *msg, **opts):
89 def log(self, event, *msg, **opts):
90 """Map log events to external commands
90 """Map log events to external commands
91
91
92 Arguments are passed on as environment variables.
92 Arguments are passed on as environment variables.
93
93
94 """
94 """
95 script = self.config('logtoprocess', event)
95 script = self.config('logtoprocess', event)
96 if script:
96 if script:
97 if msg:
97 if msg:
98 # try to format the log message given the remaining
98 # try to format the log message given the remaining
99 # arguments
99 # arguments
100 try:
100 try:
101 # Python string formatting with % either uses a
101 # Python string formatting with % either uses a
102 # dictionary *or* tuple, but not both. If we have
102 # dictionary *or* tuple, but not both. If we have
103 # keyword options, assume we need a mapping.
103 # keyword options, assume we need a mapping.
104 formatted = msg[0] % (opts or msg[1:])
104 formatted = msg[0] % (opts or msg[1:])
105 except (TypeError, KeyError):
105 except (TypeError, KeyError):
106 # Failed to apply the arguments, ignore
106 # Failed to apply the arguments, ignore
107 formatted = msg[0]
107 formatted = msg[0]
108 messages = (formatted,) + msg[1:]
108 messages = (formatted,) + msg[1:]
109 else:
109 else:
110 messages = msg
110 messages = msg
111 # positional arguments are listed as MSG[N] keys in the
111 # positional arguments are listed as MSG[N] keys in the
112 # environment
112 # environment
113 msgpairs = (
113 msgpairs = (
114 ('MSG{0:d}'.format(i), str(m))
114 ('MSG{0:d}'.format(i), str(m))
115 for i, m in enumerate(messages, 1))
115 for i, m in enumerate(messages, 1))
116 # keyword arguments get prefixed with OPT_ and uppercased
116 # keyword arguments get prefixed with OPT_ and uppercased
117 optpairs = (
117 optpairs = (
118 ('OPT_{0}'.format(key.upper()), str(value))
118 ('OPT_{0}'.format(key.upper()), str(value))
119 for key, value in opts.iteritems())
119 for key, value in opts.iteritems())
120 env = dict(itertools.chain(os.environ.items(),
120 env = dict(itertools.chain(os.environ.items(),
121 msgpairs, optpairs),
121 msgpairs, optpairs),
122 EVENT=event, HGPID=str(os.getpid()))
122 EVENT=event, HGPID=str(os.getpid()))
123 # Connect stdin to /dev/null to prevent child processes messing
123 # Connect stdin to /dev/null to prevent child processes messing
124 # with mercurial's stdin.
124 # with mercurial's stdin.
125 runshellcommand(script, env)
125 runshellcommand(script, env)
126 return super(logtoprocessui, self).log(event, *msg, **opts)
126 return super(logtoprocessui, self).log(event, *msg, **opts)
127
127
128 # Replace the class for this instance and all clones created from it:
128 # Replace the class for this instance and all clones created from it:
129 ui.__class__ = logtoprocessui
129 ui.__class__ = logtoprocessui
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
General Comments 0
You need to be logged in to leave comments. Login now