##// END OF EJS Templates
pathutil: tease out a new library to break an import cycle from canonpath use
Augie Fackler -
r20033:f9628707 default
parent child Browse files
Show More
@@ -0,0 +1,144 b''
1 import os, errno, stat
2
3 import util
4 from i18n import _
5
6 class pathauditor(object):
7 '''ensure that a filesystem path contains no banned components.
8 the following properties of a path are checked:
9
10 - ends with a directory separator
11 - under top-level .hg
12 - starts at the root of a windows drive
13 - contains ".."
14 - traverses a symlink (e.g. a/symlink_here/b)
15 - inside a nested repository (a callback can be used to approve
16 some nested repositories, e.g., subrepositories)
17 '''
18
19 def __init__(self, root, callback=None):
20 self.audited = set()
21 self.auditeddir = set()
22 self.root = root
23 self.callback = callback
24 if os.path.lexists(root) and not util.checkcase(root):
25 self.normcase = util.normcase
26 else:
27 self.normcase = lambda x: x
28
29 def __call__(self, path):
30 '''Check the relative path.
31 path may contain a pattern (e.g. foodir/**.txt)'''
32
33 path = util.localpath(path)
34 normpath = self.normcase(path)
35 if normpath in self.audited:
36 return
37 # AIX ignores "/" at end of path, others raise EISDIR.
38 if util.endswithsep(path):
39 raise util.Abort(_("path ends in directory separator: %s") % path)
40 parts = util.splitpath(path)
41 if (os.path.splitdrive(path)[0]
42 or parts[0].lower() in ('.hg', '.hg.', '')
43 or os.pardir in parts):
44 raise util.Abort(_("path contains illegal component: %s") % path)
45 if '.hg' in path.lower():
46 lparts = [p.lower() for p in parts]
47 for p in '.hg', '.hg.':
48 if p in lparts[1:]:
49 pos = lparts.index(p)
50 base = os.path.join(*parts[:pos])
51 raise util.Abort(_("path '%s' is inside nested repo %r")
52 % (path, base))
53
54 normparts = util.splitpath(normpath)
55 assert len(parts) == len(normparts)
56
57 parts.pop()
58 normparts.pop()
59 prefixes = []
60 while parts:
61 prefix = os.sep.join(parts)
62 normprefix = os.sep.join(normparts)
63 if normprefix in self.auditeddir:
64 break
65 curpath = os.path.join(self.root, prefix)
66 try:
67 st = os.lstat(curpath)
68 except OSError, err:
69 # EINVAL can be raised as invalid path syntax under win32.
70 # They must be ignored for patterns can be checked too.
71 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
72 raise
73 else:
74 if stat.S_ISLNK(st.st_mode):
75 raise util.Abort(
76 _('path %r traverses symbolic link %r')
77 % (path, prefix))
78 elif (stat.S_ISDIR(st.st_mode) and
79 os.path.isdir(os.path.join(curpath, '.hg'))):
80 if not self.callback or not self.callback(curpath):
81 raise util.Abort(_("path '%s' is inside nested "
82 "repo %r")
83 % (path, prefix))
84 prefixes.append(normprefix)
85 parts.pop()
86 normparts.pop()
87
88 self.audited.add(normpath)
89 # only add prefixes to the cache after checking everything: we don't
90 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
91 self.auditeddir.update(prefixes)
92
93 def check(self, path):
94 try:
95 self(path)
96 return True
97 except (OSError, util.Abort):
98 return False
99
100 def canonpath(root, cwd, myname, auditor=None):
101 '''return the canonical path of myname, given cwd and root'''
102 if util.endswithsep(root):
103 rootsep = root
104 else:
105 rootsep = root + os.sep
106 name = myname
107 if not os.path.isabs(name):
108 name = os.path.join(root, cwd, name)
109 name = os.path.normpath(name)
110 if auditor is None:
111 auditor = pathauditor(root)
112 if name != rootsep and name.startswith(rootsep):
113 name = name[len(rootsep):]
114 auditor(name)
115 return util.pconvert(name)
116 elif name == root:
117 return ''
118 else:
119 # Determine whether `name' is in the hierarchy at or beneath `root',
120 # by iterating name=dirname(name) until that causes no change (can't
121 # check name == '/', because that doesn't work on windows). The list
122 # `rel' holds the reversed list of components making up the relative
123 # file name we want.
124 rel = []
125 while True:
126 try:
127 s = util.samefile(name, root)
128 except OSError:
129 s = False
130 if s:
131 if not rel:
132 # name was actually the same as root (maybe a symlink)
133 return ''
134 rel.reverse()
135 name = os.path.join(*rel)
136 auditor(name)
137 return util.pconvert(name)
138 dirname, basename = util.split(name)
139 rel.append(basename)
140 if dirname == name:
141 break
142 name = dirname
143
144 raise util.Abort(_("%s not under root '%s'") % (myname, root))
@@ -1,732 +1,732 b''
1 1 # keyword.py - $Keyword$ expansion for Mercurial
2 2 #
3 3 # Copyright 2007-2012 Christian Ebert <blacktrash@gmx.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 #
8 8 # $Id$
9 9 #
10 10 # Keyword expansion hack against the grain of a Distributed SCM
11 11 #
12 12 # There are many good reasons why this is not needed in a distributed
13 13 # SCM, still it may be useful in very small projects based on single
14 14 # files (like LaTeX packages), that are mostly addressed to an
15 15 # audience not running a version control system.
16 16 #
17 17 # For in-depth discussion refer to
18 18 # <http://mercurial.selenic.com/wiki/KeywordPlan>.
19 19 #
20 20 # Keyword expansion is based on Mercurial's changeset template mappings.
21 21 #
22 22 # Binary files are not touched.
23 23 #
24 24 # Files to act upon/ignore are specified in the [keyword] section.
25 25 # Customized keyword template mappings in the [keywordmaps] section.
26 26 #
27 27 # Run "hg help keyword" and "hg kwdemo" to get info on configuration.
28 28
29 29 '''expand keywords in tracked files
30 30
31 31 This extension expands RCS/CVS-like or self-customized $Keywords$ in
32 32 tracked text files selected by your configuration.
33 33
34 34 Keywords are only expanded in local repositories and not stored in the
35 35 change history. The mechanism can be regarded as a convenience for the
36 36 current user or for archive distribution.
37 37
38 38 Keywords expand to the changeset data pertaining to the latest change
39 39 relative to the working directory parent of each file.
40 40
41 41 Configuration is done in the [keyword], [keywordset] and [keywordmaps]
42 42 sections of hgrc files.
43 43
44 44 Example::
45 45
46 46 [keyword]
47 47 # expand keywords in every python file except those matching "x*"
48 48 **.py =
49 49 x* = ignore
50 50
51 51 [keywordset]
52 52 # prefer svn- over cvs-like default keywordmaps
53 53 svn = True
54 54
55 55 .. note::
56 56
57 57 The more specific you are in your filename patterns the less you
58 58 lose speed in huge repositories.
59 59
60 60 For [keywordmaps] template mapping and expansion demonstration and
61 61 control run :hg:`kwdemo`. See :hg:`help templates` for a list of
62 62 available templates and filters.
63 63
64 64 Three additional date template filters are provided:
65 65
66 66 :``utcdate``: "2006/09/18 15:13:13"
67 67 :``svnutcdate``: "2006-09-18 15:13:13Z"
68 68 :``svnisodate``: "2006-09-18 08:13:13 -700 (Mon, 18 Sep 2006)"
69 69
70 70 The default template mappings (view with :hg:`kwdemo -d`) can be
71 71 replaced with customized keywords and templates. Again, run
72 72 :hg:`kwdemo` to control the results of your configuration changes.
73 73
74 74 Before changing/disabling active keywords, you must run :hg:`kwshrink`
75 75 to avoid storing expanded keywords in the change history.
76 76
77 77 To force expansion after enabling it, or a configuration change, run
78 78 :hg:`kwexpand`.
79 79
80 80 Expansions spanning more than one line and incremental expansions,
81 81 like CVS' $Log$, are not supported. A keyword template map "Log =
82 82 {desc}" expands to the first line of the changeset description.
83 83 '''
84 84
85 85 from mercurial import commands, context, cmdutil, dispatch, filelog, extensions
86 86 from mercurial import localrepo, match, patch, templatefilters, templater, util
87 from mercurial import scmutil
87 from mercurial import scmutil, pathutil
88 88 from mercurial.hgweb import webcommands
89 89 from mercurial.i18n import _
90 90 import os, re, shutil, tempfile
91 91
92 92 commands.optionalrepo += ' kwdemo'
93 93 commands.inferrepo += ' kwexpand kwfiles kwshrink'
94 94
95 95 cmdtable = {}
96 96 command = cmdutil.command(cmdtable)
97 97 testedwith = 'internal'
98 98
99 99 # hg commands that do not act on keywords
100 100 nokwcommands = ('add addremove annotate bundle export grep incoming init log'
101 101 ' outgoing push tip verify convert email glog')
102 102
103 103 # hg commands that trigger expansion only when writing to working dir,
104 104 # not when reading filelog, and unexpand when reading from working dir
105 105 restricted = 'merge kwexpand kwshrink record qrecord resolve transplant'
106 106
107 107 # names of extensions using dorecord
108 108 recordextensions = 'record'
109 109
110 110 colortable = {
111 111 'kwfiles.enabled': 'green bold',
112 112 'kwfiles.deleted': 'cyan bold underline',
113 113 'kwfiles.enabledunknown': 'green',
114 114 'kwfiles.ignored': 'bold',
115 115 'kwfiles.ignoredunknown': 'none'
116 116 }
117 117
118 118 # date like in cvs' $Date
119 119 def utcdate(text):
120 120 ''':utcdate: Date. Returns a UTC-date in this format: "2009/08/18 11:00:13".
121 121 '''
122 122 return util.datestr((util.parsedate(text)[0], 0), '%Y/%m/%d %H:%M:%S')
123 123 # date like in svn's $Date
124 124 def svnisodate(text):
125 125 ''':svnisodate: Date. Returns a date in this format: "2009-08-18 13:00:13
126 126 +0200 (Tue, 18 Aug 2009)".
127 127 '''
128 128 return util.datestr(text, '%Y-%m-%d %H:%M:%S %1%2 (%a, %d %b %Y)')
129 129 # date like in svn's $Id
130 130 def svnutcdate(text):
131 131 ''':svnutcdate: Date. Returns a UTC-date in this format: "2009-08-18
132 132 11:00:13Z".
133 133 '''
134 134 return util.datestr((util.parsedate(text)[0], 0), '%Y-%m-%d %H:%M:%SZ')
135 135
136 136 templatefilters.filters.update({'utcdate': utcdate,
137 137 'svnisodate': svnisodate,
138 138 'svnutcdate': svnutcdate})
139 139
140 140 # make keyword tools accessible
141 141 kwtools = {'templater': None, 'hgcmd': ''}
142 142
143 143 def _defaultkwmaps(ui):
144 144 '''Returns default keywordmaps according to keywordset configuration.'''
145 145 templates = {
146 146 'Revision': '{node|short}',
147 147 'Author': '{author|user}',
148 148 }
149 149 kwsets = ({
150 150 'Date': '{date|utcdate}',
151 151 'RCSfile': '{file|basename},v',
152 152 'RCSFile': '{file|basename},v', # kept for backwards compatibility
153 153 # with hg-keyword
154 154 'Source': '{root}/{file},v',
155 155 'Id': '{file|basename},v {node|short} {date|utcdate} {author|user}',
156 156 'Header': '{root}/{file},v {node|short} {date|utcdate} {author|user}',
157 157 }, {
158 158 'Date': '{date|svnisodate}',
159 159 'Id': '{file|basename},v {node|short} {date|svnutcdate} {author|user}',
160 160 'LastChangedRevision': '{node|short}',
161 161 'LastChangedBy': '{author|user}',
162 162 'LastChangedDate': '{date|svnisodate}',
163 163 })
164 164 templates.update(kwsets[ui.configbool('keywordset', 'svn')])
165 165 return templates
166 166
167 167 def _shrinktext(text, subfunc):
168 168 '''Helper for keyword expansion removal in text.
169 169 Depending on subfunc also returns number of substitutions.'''
170 170 return subfunc(r'$\1$', text)
171 171
172 172 def _preselect(wstatus, changed):
173 173 '''Retrieves modified and added files from a working directory state
174 174 and returns the subset of each contained in given changed files
175 175 retrieved from a change context.'''
176 176 modified, added = wstatus[:2]
177 177 modified = [f for f in modified if f in changed]
178 178 added = [f for f in added if f in changed]
179 179 return modified, added
180 180
181 181
182 182 class kwtemplater(object):
183 183 '''
184 184 Sets up keyword templates, corresponding keyword regex, and
185 185 provides keyword substitution functions.
186 186 '''
187 187
188 188 def __init__(self, ui, repo, inc, exc):
189 189 self.ui = ui
190 190 self.repo = repo
191 191 self.match = match.match(repo.root, '', [], inc, exc)
192 192 self.restrict = kwtools['hgcmd'] in restricted.split()
193 193 self.postcommit = False
194 194
195 195 kwmaps = self.ui.configitems('keywordmaps')
196 196 if kwmaps: # override default templates
197 197 self.templates = dict((k, templater.parsestring(v, False))
198 198 for k, v in kwmaps)
199 199 else:
200 200 self.templates = _defaultkwmaps(self.ui)
201 201
202 202 @util.propertycache
203 203 def escape(self):
204 204 '''Returns bar-separated and escaped keywords.'''
205 205 return '|'.join(map(re.escape, self.templates.keys()))
206 206
207 207 @util.propertycache
208 208 def rekw(self):
209 209 '''Returns regex for unexpanded keywords.'''
210 210 return re.compile(r'\$(%s)\$' % self.escape)
211 211
212 212 @util.propertycache
213 213 def rekwexp(self):
214 214 '''Returns regex for expanded keywords.'''
215 215 return re.compile(r'\$(%s): [^$\n\r]*? \$' % self.escape)
216 216
217 217 def substitute(self, data, path, ctx, subfunc):
218 218 '''Replaces keywords in data with expanded template.'''
219 219 def kwsub(mobj):
220 220 kw = mobj.group(1)
221 221 ct = cmdutil.changeset_templater(self.ui, self.repo,
222 222 False, None, '', False)
223 223 ct.use_template(self.templates[kw])
224 224 self.ui.pushbuffer()
225 225 ct.show(ctx, root=self.repo.root, file=path)
226 226 ekw = templatefilters.firstline(self.ui.popbuffer())
227 227 return '$%s: %s $' % (kw, ekw)
228 228 return subfunc(kwsub, data)
229 229
230 230 def linkctx(self, path, fileid):
231 231 '''Similar to filelog.linkrev, but returns a changectx.'''
232 232 return self.repo.filectx(path, fileid=fileid).changectx()
233 233
234 234 def expand(self, path, node, data):
235 235 '''Returns data with keywords expanded.'''
236 236 if not self.restrict and self.match(path) and not util.binary(data):
237 237 ctx = self.linkctx(path, node)
238 238 return self.substitute(data, path, ctx, self.rekw.sub)
239 239 return data
240 240
241 241 def iskwfile(self, cand, ctx):
242 242 '''Returns subset of candidates which are configured for keyword
243 243 expansion but are not symbolic links.'''
244 244 return [f for f in cand if self.match(f) and 'l' not in ctx.flags(f)]
245 245
246 246 def overwrite(self, ctx, candidates, lookup, expand, rekw=False):
247 247 '''Overwrites selected files expanding/shrinking keywords.'''
248 248 if self.restrict or lookup or self.postcommit: # exclude kw_copy
249 249 candidates = self.iskwfile(candidates, ctx)
250 250 if not candidates:
251 251 return
252 252 kwcmd = self.restrict and lookup # kwexpand/kwshrink
253 253 if self.restrict or expand and lookup:
254 254 mf = ctx.manifest()
255 255 if self.restrict or rekw:
256 256 re_kw = self.rekw
257 257 else:
258 258 re_kw = self.rekwexp
259 259 if expand:
260 260 msg = _('overwriting %s expanding keywords\n')
261 261 else:
262 262 msg = _('overwriting %s shrinking keywords\n')
263 263 for f in candidates:
264 264 if self.restrict:
265 265 data = self.repo.file(f).read(mf[f])
266 266 else:
267 267 data = self.repo.wread(f)
268 268 if util.binary(data):
269 269 continue
270 270 if expand:
271 271 if lookup:
272 272 ctx = self.linkctx(f, mf[f])
273 273 data, found = self.substitute(data, f, ctx, re_kw.subn)
274 274 elif self.restrict:
275 275 found = re_kw.search(data)
276 276 else:
277 277 data, found = _shrinktext(data, re_kw.subn)
278 278 if found:
279 279 self.ui.note(msg % f)
280 280 fp = self.repo.wopener(f, "wb", atomictemp=True)
281 281 fp.write(data)
282 282 fp.close()
283 283 if kwcmd:
284 284 self.repo.dirstate.normal(f)
285 285 elif self.postcommit:
286 286 self.repo.dirstate.normallookup(f)
287 287
288 288 def shrink(self, fname, text):
289 289 '''Returns text with all keyword substitutions removed.'''
290 290 if self.match(fname) and not util.binary(text):
291 291 return _shrinktext(text, self.rekwexp.sub)
292 292 return text
293 293
294 294 def shrinklines(self, fname, lines):
295 295 '''Returns lines with keyword substitutions removed.'''
296 296 if self.match(fname):
297 297 text = ''.join(lines)
298 298 if not util.binary(text):
299 299 return _shrinktext(text, self.rekwexp.sub).splitlines(True)
300 300 return lines
301 301
302 302 def wread(self, fname, data):
303 303 '''If in restricted mode returns data read from wdir with
304 304 keyword substitutions removed.'''
305 305 if self.restrict:
306 306 return self.shrink(fname, data)
307 307 return data
308 308
309 309 class kwfilelog(filelog.filelog):
310 310 '''
311 311 Subclass of filelog to hook into its read, add, cmp methods.
312 312 Keywords are "stored" unexpanded, and processed on reading.
313 313 '''
314 314 def __init__(self, opener, kwt, path):
315 315 super(kwfilelog, self).__init__(opener, path)
316 316 self.kwt = kwt
317 317 self.path = path
318 318
319 319 def read(self, node):
320 320 '''Expands keywords when reading filelog.'''
321 321 data = super(kwfilelog, self).read(node)
322 322 if self.renamed(node):
323 323 return data
324 324 return self.kwt.expand(self.path, node, data)
325 325
326 326 def add(self, text, meta, tr, link, p1=None, p2=None):
327 327 '''Removes keyword substitutions when adding to filelog.'''
328 328 text = self.kwt.shrink(self.path, text)
329 329 return super(kwfilelog, self).add(text, meta, tr, link, p1, p2)
330 330
331 331 def cmp(self, node, text):
332 332 '''Removes keyword substitutions for comparison.'''
333 333 text = self.kwt.shrink(self.path, text)
334 334 return super(kwfilelog, self).cmp(node, text)
335 335
336 336 def _status(ui, repo, wctx, kwt, *pats, **opts):
337 337 '''Bails out if [keyword] configuration is not active.
338 338 Returns status of working directory.'''
339 339 if kwt:
340 340 return repo.status(match=scmutil.match(wctx, pats, opts), clean=True,
341 341 unknown=opts.get('unknown') or opts.get('all'))
342 342 if ui.configitems('keyword'):
343 343 raise util.Abort(_('[keyword] patterns cannot match'))
344 344 raise util.Abort(_('no [keyword] patterns configured'))
345 345
346 346 def _kwfwrite(ui, repo, expand, *pats, **opts):
347 347 '''Selects files and passes them to kwtemplater.overwrite.'''
348 348 wctx = repo[None]
349 349 if len(wctx.parents()) > 1:
350 350 raise util.Abort(_('outstanding uncommitted merge'))
351 351 kwt = kwtools['templater']
352 352 wlock = repo.wlock()
353 353 try:
354 354 status = _status(ui, repo, wctx, kwt, *pats, **opts)
355 355 modified, added, removed, deleted, unknown, ignored, clean = status
356 356 if modified or added or removed or deleted:
357 357 raise util.Abort(_('outstanding uncommitted changes'))
358 358 kwt.overwrite(wctx, clean, True, expand)
359 359 finally:
360 360 wlock.release()
361 361
362 362 @command('kwdemo',
363 363 [('d', 'default', None, _('show default keyword template maps')),
364 364 ('f', 'rcfile', '',
365 365 _('read maps from rcfile'), _('FILE'))],
366 366 _('hg kwdemo [-d] [-f RCFILE] [TEMPLATEMAP]...'))
367 367 def demo(ui, repo, *args, **opts):
368 368 '''print [keywordmaps] configuration and an expansion example
369 369
370 370 Show current, custom, or default keyword template maps and their
371 371 expansions.
372 372
373 373 Extend the current configuration by specifying maps as arguments
374 374 and using -f/--rcfile to source an external hgrc file.
375 375
376 376 Use -d/--default to disable current configuration.
377 377
378 378 See :hg:`help templates` for information on templates and filters.
379 379 '''
380 380 def demoitems(section, items):
381 381 ui.write('[%s]\n' % section)
382 382 for k, v in sorted(items):
383 383 ui.write('%s = %s\n' % (k, v))
384 384
385 385 fn = 'demo.txt'
386 386 tmpdir = tempfile.mkdtemp('', 'kwdemo.')
387 387 ui.note(_('creating temporary repository at %s\n') % tmpdir)
388 388 repo = localrepo.localrepository(repo.baseui, tmpdir, True)
389 389 ui.setconfig('keyword', fn, '')
390 390 svn = ui.configbool('keywordset', 'svn')
391 391 # explicitly set keywordset for demo output
392 392 ui.setconfig('keywordset', 'svn', svn)
393 393
394 394 uikwmaps = ui.configitems('keywordmaps')
395 395 if args or opts.get('rcfile'):
396 396 ui.status(_('\n\tconfiguration using custom keyword template maps\n'))
397 397 if uikwmaps:
398 398 ui.status(_('\textending current template maps\n'))
399 399 if opts.get('default') or not uikwmaps:
400 400 if svn:
401 401 ui.status(_('\toverriding default svn keywordset\n'))
402 402 else:
403 403 ui.status(_('\toverriding default cvs keywordset\n'))
404 404 if opts.get('rcfile'):
405 405 ui.readconfig(opts.get('rcfile'))
406 406 if args:
407 407 # simulate hgrc parsing
408 408 rcmaps = ['[keywordmaps]\n'] + [a + '\n' for a in args]
409 409 fp = repo.opener('hgrc', 'w')
410 410 fp.writelines(rcmaps)
411 411 fp.close()
412 412 ui.readconfig(repo.join('hgrc'))
413 413 kwmaps = dict(ui.configitems('keywordmaps'))
414 414 elif opts.get('default'):
415 415 if svn:
416 416 ui.status(_('\n\tconfiguration using default svn keywordset\n'))
417 417 else:
418 418 ui.status(_('\n\tconfiguration using default cvs keywordset\n'))
419 419 kwmaps = _defaultkwmaps(ui)
420 420 if uikwmaps:
421 421 ui.status(_('\tdisabling current template maps\n'))
422 422 for k, v in kwmaps.iteritems():
423 423 ui.setconfig('keywordmaps', k, v)
424 424 else:
425 425 ui.status(_('\n\tconfiguration using current keyword template maps\n'))
426 426 if uikwmaps:
427 427 kwmaps = dict(uikwmaps)
428 428 else:
429 429 kwmaps = _defaultkwmaps(ui)
430 430
431 431 uisetup(ui)
432 432 reposetup(ui, repo)
433 433 ui.write('[extensions]\nkeyword =\n')
434 434 demoitems('keyword', ui.configitems('keyword'))
435 435 demoitems('keywordset', ui.configitems('keywordset'))
436 436 demoitems('keywordmaps', kwmaps.iteritems())
437 437 keywords = '$' + '$\n$'.join(sorted(kwmaps.keys())) + '$\n'
438 438 repo.wopener.write(fn, keywords)
439 439 repo[None].add([fn])
440 440 ui.note(_('\nkeywords written to %s:\n') % fn)
441 441 ui.note(keywords)
442 442 repo.dirstate.setbranch('demobranch')
443 443 for name, cmd in ui.configitems('hooks'):
444 444 if name.split('.', 1)[0].find('commit') > -1:
445 445 repo.ui.setconfig('hooks', name, '')
446 446 msg = _('hg keyword configuration and expansion example')
447 447 ui.note("hg ci -m '%s'\n" % msg) # check-code-ignore
448 448 repo.commit(text=msg)
449 449 ui.status(_('\n\tkeywords expanded\n'))
450 450 ui.write(repo.wread(fn))
451 451 shutil.rmtree(tmpdir, ignore_errors=True)
452 452
453 453 @command('kwexpand', commands.walkopts, _('hg kwexpand [OPTION]... [FILE]...'))
454 454 def expand(ui, repo, *pats, **opts):
455 455 '''expand keywords in the working directory
456 456
457 457 Run after (re)enabling keyword expansion.
458 458
459 459 kwexpand refuses to run if given files contain local changes.
460 460 '''
461 461 # 3rd argument sets expansion to True
462 462 _kwfwrite(ui, repo, True, *pats, **opts)
463 463
464 464 @command('kwfiles',
465 465 [('A', 'all', None, _('show keyword status flags of all files')),
466 466 ('i', 'ignore', None, _('show files excluded from expansion')),
467 467 ('u', 'unknown', None, _('only show unknown (not tracked) files')),
468 468 ] + commands.walkopts,
469 469 _('hg kwfiles [OPTION]... [FILE]...'))
470 470 def files(ui, repo, *pats, **opts):
471 471 '''show files configured for keyword expansion
472 472
473 473 List which files in the working directory are matched by the
474 474 [keyword] configuration patterns.
475 475
476 476 Useful to prevent inadvertent keyword expansion and to speed up
477 477 execution by including only files that are actual candidates for
478 478 expansion.
479 479
480 480 See :hg:`help keyword` on how to construct patterns both for
481 481 inclusion and exclusion of files.
482 482
483 483 With -A/--all and -v/--verbose the codes used to show the status
484 484 of files are::
485 485
486 486 K = keyword expansion candidate
487 487 k = keyword expansion candidate (not tracked)
488 488 I = ignored
489 489 i = ignored (not tracked)
490 490 '''
491 491 kwt = kwtools['templater']
492 492 wctx = repo[None]
493 493 status = _status(ui, repo, wctx, kwt, *pats, **opts)
494 494 cwd = pats and repo.getcwd() or ''
495 495 modified, added, removed, deleted, unknown, ignored, clean = status
496 496 files = []
497 497 if not opts.get('unknown') or opts.get('all'):
498 498 files = sorted(modified + added + clean)
499 499 kwfiles = kwt.iskwfile(files, wctx)
500 500 kwdeleted = kwt.iskwfile(deleted, wctx)
501 501 kwunknown = kwt.iskwfile(unknown, wctx)
502 502 if not opts.get('ignore') or opts.get('all'):
503 503 showfiles = kwfiles, kwdeleted, kwunknown
504 504 else:
505 505 showfiles = [], [], []
506 506 if opts.get('all') or opts.get('ignore'):
507 507 showfiles += ([f for f in files if f not in kwfiles],
508 508 [f for f in unknown if f not in kwunknown])
509 509 kwlabels = 'enabled deleted enabledunknown ignored ignoredunknown'.split()
510 510 kwstates = zip(kwlabels, 'K!kIi', showfiles)
511 511 fm = ui.formatter('kwfiles', opts)
512 512 fmt = '%.0s%s\n'
513 513 if opts.get('all') or ui.verbose:
514 514 fmt = '%s %s\n'
515 515 for kwstate, char, filenames in kwstates:
516 516 label = 'kwfiles.' + kwstate
517 517 for f in filenames:
518 518 fm.startitem()
519 519 fm.write('kwstatus path', fmt, char,
520 520 repo.pathto(f, cwd), label=label)
521 521 fm.end()
522 522
523 523 @command('kwshrink', commands.walkopts, _('hg kwshrink [OPTION]... [FILE]...'))
524 524 def shrink(ui, repo, *pats, **opts):
525 525 '''revert expanded keywords in the working directory
526 526
527 527 Must be run before changing/disabling active keywords.
528 528
529 529 kwshrink refuses to run if given files contain local changes.
530 530 '''
531 531 # 3rd argument sets expansion to False
532 532 _kwfwrite(ui, repo, False, *pats, **opts)
533 533
534 534
535 535 def uisetup(ui):
536 536 ''' Monkeypatches dispatch._parse to retrieve user command.'''
537 537
538 538 def kwdispatch_parse(orig, ui, args):
539 539 '''Monkeypatch dispatch._parse to obtain running hg command.'''
540 540 cmd, func, args, options, cmdoptions = orig(ui, args)
541 541 kwtools['hgcmd'] = cmd
542 542 return cmd, func, args, options, cmdoptions
543 543
544 544 extensions.wrapfunction(dispatch, '_parse', kwdispatch_parse)
545 545
546 546 def reposetup(ui, repo):
547 547 '''Sets up repo as kwrepo for keyword substitution.
548 548 Overrides file method to return kwfilelog instead of filelog
549 549 if file matches user configuration.
550 550 Wraps commit to overwrite configured files with updated
551 551 keyword substitutions.
552 552 Monkeypatches patch and webcommands.'''
553 553
554 554 try:
555 555 if (not repo.local() or kwtools['hgcmd'] in nokwcommands.split()
556 556 or '.hg' in util.splitpath(repo.root)
557 557 or repo._url.startswith('bundle:')):
558 558 return
559 559 except AttributeError:
560 560 pass
561 561
562 562 inc, exc = [], ['.hg*']
563 563 for pat, opt in ui.configitems('keyword'):
564 564 if opt != 'ignore':
565 565 inc.append(pat)
566 566 else:
567 567 exc.append(pat)
568 568 if not inc:
569 569 return
570 570
571 571 kwtools['templater'] = kwt = kwtemplater(ui, repo, inc, exc)
572 572
573 573 class kwrepo(repo.__class__):
574 574 def file(self, f):
575 575 if f[0] == '/':
576 576 f = f[1:]
577 577 return kwfilelog(self.sopener, kwt, f)
578 578
579 579 def wread(self, filename):
580 580 data = super(kwrepo, self).wread(filename)
581 581 return kwt.wread(filename, data)
582 582
583 583 def commit(self, *args, **opts):
584 584 # use custom commitctx for user commands
585 585 # other extensions can still wrap repo.commitctx directly
586 586 self.commitctx = self.kwcommitctx
587 587 try:
588 588 return super(kwrepo, self).commit(*args, **opts)
589 589 finally:
590 590 del self.commitctx
591 591
592 592 def kwcommitctx(self, ctx, error=False):
593 593 n = super(kwrepo, self).commitctx(ctx, error)
594 594 # no lock needed, only called from repo.commit() which already locks
595 595 if not kwt.postcommit:
596 596 restrict = kwt.restrict
597 597 kwt.restrict = True
598 598 kwt.overwrite(self[n], sorted(ctx.added() + ctx.modified()),
599 599 False, True)
600 600 kwt.restrict = restrict
601 601 return n
602 602
603 603 def rollback(self, dryrun=False, force=False):
604 604 wlock = self.wlock()
605 605 try:
606 606 if not dryrun:
607 607 changed = self['.'].files()
608 608 ret = super(kwrepo, self).rollback(dryrun, force)
609 609 if not dryrun:
610 610 ctx = self['.']
611 611 modified, added = _preselect(self[None].status(), changed)
612 612 kwt.overwrite(ctx, modified, True, True)
613 613 kwt.overwrite(ctx, added, True, False)
614 614 return ret
615 615 finally:
616 616 wlock.release()
617 617
618 618 # monkeypatches
619 619 def kwpatchfile_init(orig, self, ui, gp, backend, store, eolmode=None):
620 620 '''Monkeypatch/wrap patch.patchfile.__init__ to avoid
621 621 rejects or conflicts due to expanded keywords in working dir.'''
622 622 orig(self, ui, gp, backend, store, eolmode)
623 623 # shrink keywords read from working dir
624 624 self.lines = kwt.shrinklines(self.fname, self.lines)
625 625
626 626 def kw_diff(orig, repo, node1=None, node2=None, match=None, changes=None,
627 627 opts=None, prefix=''):
628 628 '''Monkeypatch patch.diff to avoid expansion.'''
629 629 kwt.restrict = True
630 630 return orig(repo, node1, node2, match, changes, opts, prefix)
631 631
632 632 def kwweb_skip(orig, web, req, tmpl):
633 633 '''Wraps webcommands.x turning off keyword expansion.'''
634 634 kwt.match = util.never
635 635 return orig(web, req, tmpl)
636 636
637 637 def kw_amend(orig, ui, repo, commitfunc, old, extra, pats, opts):
638 638 '''Wraps cmdutil.amend expanding keywords after amend.'''
639 639 wlock = repo.wlock()
640 640 try:
641 641 kwt.postcommit = True
642 642 newid = orig(ui, repo, commitfunc, old, extra, pats, opts)
643 643 if newid != old.node():
644 644 ctx = repo[newid]
645 645 kwt.restrict = True
646 646 kwt.overwrite(ctx, ctx.files(), False, True)
647 647 kwt.restrict = False
648 648 return newid
649 649 finally:
650 650 wlock.release()
651 651
652 652 def kw_copy(orig, ui, repo, pats, opts, rename=False):
653 653 '''Wraps cmdutil.copy so that copy/rename destinations do not
654 654 contain expanded keywords.
655 655 Note that the source of a regular file destination may also be a
656 656 symlink:
657 657 hg cp sym x -> x is symlink
658 658 cp sym x; hg cp -A sym x -> x is file (maybe expanded keywords)
659 659 For the latter we have to follow the symlink to find out whether its
660 660 target is configured for expansion and we therefore must unexpand the
661 661 keywords in the destination.'''
662 662 wlock = repo.wlock()
663 663 try:
664 664 orig(ui, repo, pats, opts, rename)
665 665 if opts.get('dry_run'):
666 666 return
667 667 wctx = repo[None]
668 668 cwd = repo.getcwd()
669 669
670 670 def haskwsource(dest):
671 671 '''Returns true if dest is a regular file and configured for
672 672 expansion or a symlink which points to a file configured for
673 673 expansion. '''
674 674 source = repo.dirstate.copied(dest)
675 675 if 'l' in wctx.flags(source):
676 source = scmutil.canonpath(repo.root, cwd,
676 source = pathutil.canonpath(repo.root, cwd,
677 677 os.path.realpath(source))
678 678 return kwt.match(source)
679 679
680 680 candidates = [f for f in repo.dirstate.copies() if
681 681 'l' not in wctx.flags(f) and haskwsource(f)]
682 682 kwt.overwrite(wctx, candidates, False, False)
683 683 finally:
684 684 wlock.release()
685 685
686 686 def kw_dorecord(orig, ui, repo, commitfunc, *pats, **opts):
687 687 '''Wraps record.dorecord expanding keywords after recording.'''
688 688 wlock = repo.wlock()
689 689 try:
690 690 # record returns 0 even when nothing has changed
691 691 # therefore compare nodes before and after
692 692 kwt.postcommit = True
693 693 ctx = repo['.']
694 694 wstatus = repo[None].status()
695 695 ret = orig(ui, repo, commitfunc, *pats, **opts)
696 696 recctx = repo['.']
697 697 if ctx != recctx:
698 698 modified, added = _preselect(wstatus, recctx.files())
699 699 kwt.restrict = False
700 700 kwt.overwrite(recctx, modified, False, True)
701 701 kwt.overwrite(recctx, added, False, True, True)
702 702 kwt.restrict = True
703 703 return ret
704 704 finally:
705 705 wlock.release()
706 706
707 707 def kwfilectx_cmp(orig, self, fctx):
708 708 # keyword affects data size, comparing wdir and filelog size does
709 709 # not make sense
710 710 if (fctx._filerev is None and
711 711 (self._repo._encodefilterpats or
712 712 kwt.match(fctx.path()) and 'l' not in fctx.flags() or
713 713 self.size() - 4 == fctx.size()) or
714 714 self.size() == fctx.size()):
715 715 return self._filelog.cmp(self._filenode, fctx.data())
716 716 return True
717 717
718 718 extensions.wrapfunction(context.filectx, 'cmp', kwfilectx_cmp)
719 719 extensions.wrapfunction(patch.patchfile, '__init__', kwpatchfile_init)
720 720 extensions.wrapfunction(patch, 'diff', kw_diff)
721 721 extensions.wrapfunction(cmdutil, 'amend', kw_amend)
722 722 extensions.wrapfunction(cmdutil, 'copy', kw_copy)
723 723 for c in 'annotate changeset rev filediff diff'.split():
724 724 extensions.wrapfunction(webcommands, c, kwweb_skip)
725 725 for name in recordextensions.split():
726 726 try:
727 727 record = extensions.find(name)
728 728 extensions.wrapfunction(record, 'dorecord', kw_dorecord)
729 729 except KeyError:
730 730 pass
731 731
732 732 repo.__class__ = kwrepo
@@ -1,1218 +1,1218 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10 10
11 11 import os
12 12 import copy
13 13
14 14 from mercurial import hg, commands, util, cmdutil, scmutil, match as match_, \
15 node, archival, error, merge, discovery
15 node, archival, error, merge, discovery, pathutil
16 16 from mercurial.i18n import _
17 17 from mercurial.node import hex
18 18 from hgext import rebase
19 19
20 20 import lfutil
21 21 import lfcommands
22 22 import basestore
23 23
24 24 # -- Utility functions: commonly/repeatedly needed functionality ---------------
25 25
26 26 def installnormalfilesmatchfn(manifest):
27 27 '''overrides scmutil.match so that the matcher it returns will ignore all
28 28 largefiles'''
29 29 oldmatch = None # for the closure
30 30 def overridematch(ctx, pats=[], opts={}, globbed=False,
31 31 default='relpath'):
32 32 match = oldmatch(ctx, pats, opts, globbed, default)
33 33 m = copy.copy(match)
34 34 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
35 35 manifest)
36 36 m._files = filter(notlfile, m._files)
37 37 m._fmap = set(m._files)
38 38 m._always = False
39 39 origmatchfn = m.matchfn
40 40 m.matchfn = lambda f: notlfile(f) and origmatchfn(f) or None
41 41 return m
42 42 oldmatch = installmatchfn(overridematch)
43 43
44 44 def installmatchfn(f):
45 45 oldmatch = scmutil.match
46 46 setattr(f, 'oldmatch', oldmatch)
47 47 scmutil.match = f
48 48 return oldmatch
49 49
50 50 def restorematchfn():
51 51 '''restores scmutil.match to what it was before installnormalfilesmatchfn
52 52 was called. no-op if scmutil.match is its original function.
53 53
54 54 Note that n calls to installnormalfilesmatchfn will require n calls to
55 55 restore matchfn to reverse'''
56 56 scmutil.match = getattr(scmutil.match, 'oldmatch', scmutil.match)
57 57
58 58 def addlargefiles(ui, repo, *pats, **opts):
59 59 large = opts.pop('large', None)
60 60 lfsize = lfutil.getminsize(
61 61 ui, lfutil.islfilesrepo(repo), opts.pop('lfsize', None))
62 62
63 63 lfmatcher = None
64 64 if lfutil.islfilesrepo(repo):
65 65 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
66 66 if lfpats:
67 67 lfmatcher = match_.match(repo.root, '', list(lfpats))
68 68
69 69 lfnames = []
70 70 m = scmutil.match(repo[None], pats, opts)
71 71 m.bad = lambda x, y: None
72 72 wctx = repo[None]
73 73 for f in repo.walk(m):
74 74 exact = m.exact(f)
75 75 lfile = lfutil.standin(f) in wctx
76 76 nfile = f in wctx
77 77 exists = lfile or nfile
78 78
79 79 # Don't warn the user when they attempt to add a normal tracked file.
80 80 # The normal add code will do that for us.
81 81 if exact and exists:
82 82 if lfile:
83 83 ui.warn(_('%s already a largefile\n') % f)
84 84 continue
85 85
86 86 if (exact or not exists) and not lfutil.isstandin(f):
87 87 wfile = repo.wjoin(f)
88 88
89 89 # In case the file was removed previously, but not committed
90 90 # (issue3507)
91 91 if not os.path.exists(wfile):
92 92 continue
93 93
94 94 abovemin = (lfsize and
95 95 os.lstat(wfile).st_size >= lfsize * 1024 * 1024)
96 96 if large or abovemin or (lfmatcher and lfmatcher(f)):
97 97 lfnames.append(f)
98 98 if ui.verbose or not exact:
99 99 ui.status(_('adding %s as a largefile\n') % m.rel(f))
100 100
101 101 bad = []
102 102 standins = []
103 103
104 104 # Need to lock, otherwise there could be a race condition between
105 105 # when standins are created and added to the repo.
106 106 wlock = repo.wlock()
107 107 try:
108 108 if not opts.get('dry_run'):
109 109 lfdirstate = lfutil.openlfdirstate(ui, repo)
110 110 for f in lfnames:
111 111 standinname = lfutil.standin(f)
112 112 lfutil.writestandin(repo, standinname, hash='',
113 113 executable=lfutil.getexecutable(repo.wjoin(f)))
114 114 standins.append(standinname)
115 115 if lfdirstate[f] == 'r':
116 116 lfdirstate.normallookup(f)
117 117 else:
118 118 lfdirstate.add(f)
119 119 lfdirstate.write()
120 120 bad += [lfutil.splitstandin(f)
121 121 for f in repo[None].add(standins)
122 122 if f in m.files()]
123 123 finally:
124 124 wlock.release()
125 125 return bad
126 126
127 127 def removelargefiles(ui, repo, *pats, **opts):
128 128 after = opts.get('after')
129 129 if not pats and not after:
130 130 raise util.Abort(_('no files specified'))
131 131 m = scmutil.match(repo[None], pats, opts)
132 132 try:
133 133 repo.lfstatus = True
134 134 s = repo.status(match=m, clean=True)
135 135 finally:
136 136 repo.lfstatus = False
137 137 manifest = repo[None].manifest()
138 138 modified, added, deleted, clean = [[f for f in list
139 139 if lfutil.standin(f) in manifest]
140 140 for list in [s[0], s[1], s[3], s[6]]]
141 141
142 142 def warn(files, msg):
143 143 for f in files:
144 144 ui.warn(msg % m.rel(f))
145 145 return int(len(files) > 0)
146 146
147 147 result = 0
148 148
149 149 if after:
150 150 remove, forget = deleted, []
151 151 result = warn(modified + added + clean,
152 152 _('not removing %s: file still exists\n'))
153 153 else:
154 154 remove, forget = deleted + clean, []
155 155 result = warn(modified, _('not removing %s: file is modified (use -f'
156 156 ' to force removal)\n'))
157 157 result = warn(added, _('not removing %s: file has been marked for add'
158 158 ' (use forget to undo)\n')) or result
159 159
160 160 for f in sorted(remove + forget):
161 161 if ui.verbose or not m.exact(f):
162 162 ui.status(_('removing %s\n') % m.rel(f))
163 163
164 164 # Need to lock because standin files are deleted then removed from the
165 165 # repository and we could race in-between.
166 166 wlock = repo.wlock()
167 167 try:
168 168 lfdirstate = lfutil.openlfdirstate(ui, repo)
169 169 for f in remove:
170 170 if not after:
171 171 # If this is being called by addremove, notify the user that we
172 172 # are removing the file.
173 173 if getattr(repo, "_isaddremove", False):
174 174 ui.status(_('removing %s\n') % f)
175 175 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
176 176 lfdirstate.remove(f)
177 177 lfdirstate.write()
178 178 forget = [lfutil.standin(f) for f in forget]
179 179 remove = [lfutil.standin(f) for f in remove]
180 180 repo[None].forget(forget)
181 181 # If this is being called by addremove, let the original addremove
182 182 # function handle this.
183 183 if not getattr(repo, "_isaddremove", False):
184 184 for f in remove:
185 185 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
186 186 repo[None].forget(remove)
187 187 finally:
188 188 wlock.release()
189 189
190 190 return result
191 191
192 192 # For overriding mercurial.hgweb.webcommands so that largefiles will
193 193 # appear at their right place in the manifests.
194 194 def decodepath(orig, path):
195 195 return lfutil.splitstandin(path) or path
196 196
197 197 # -- Wrappers: modify existing commands --------------------------------
198 198
199 199 # Add works by going through the files that the user wanted to add and
200 200 # checking if they should be added as largefiles. Then it makes a new
201 201 # matcher which matches only the normal files and runs the original
202 202 # version of add.
203 203 def overrideadd(orig, ui, repo, *pats, **opts):
204 204 normal = opts.pop('normal')
205 205 if normal:
206 206 if opts.get('large'):
207 207 raise util.Abort(_('--normal cannot be used with --large'))
208 208 return orig(ui, repo, *pats, **opts)
209 209 bad = addlargefiles(ui, repo, *pats, **opts)
210 210 installnormalfilesmatchfn(repo[None].manifest())
211 211 result = orig(ui, repo, *pats, **opts)
212 212 restorematchfn()
213 213
214 214 return (result == 1 or bad) and 1 or 0
215 215
216 216 def overrideremove(orig, ui, repo, *pats, **opts):
217 217 installnormalfilesmatchfn(repo[None].manifest())
218 218 result = orig(ui, repo, *pats, **opts)
219 219 restorematchfn()
220 220 return removelargefiles(ui, repo, *pats, **opts) or result
221 221
222 222 def overridestatusfn(orig, repo, rev2, **opts):
223 223 try:
224 224 repo._repo.lfstatus = True
225 225 return orig(repo, rev2, **opts)
226 226 finally:
227 227 repo._repo.lfstatus = False
228 228
229 229 def overridestatus(orig, ui, repo, *pats, **opts):
230 230 try:
231 231 repo.lfstatus = True
232 232 return orig(ui, repo, *pats, **opts)
233 233 finally:
234 234 repo.lfstatus = False
235 235
236 236 def overridedirty(orig, repo, ignoreupdate=False):
237 237 try:
238 238 repo._repo.lfstatus = True
239 239 return orig(repo, ignoreupdate)
240 240 finally:
241 241 repo._repo.lfstatus = False
242 242
243 243 def overridelog(orig, ui, repo, *pats, **opts):
244 244 def overridematch(ctx, pats=[], opts={}, globbed=False,
245 245 default='relpath'):
246 246 """Matcher that merges root directory with .hglf, suitable for log.
247 247 It is still possible to match .hglf directly.
248 248 For any listed files run log on the standin too.
249 249 matchfn tries both the given filename and with .hglf stripped.
250 250 """
251 251 match = oldmatch(ctx, pats, opts, globbed, default)
252 252 m = copy.copy(match)
253 253 for i in range(0, len(m._files)):
254 254 standin = lfutil.standin(m._files[i])
255 255 if standin in repo[ctx.node()]:
256 256 m._files[i] = standin
257 257 m._fmap = set(m._files)
258 258 m._always = False
259 259 origmatchfn = m.matchfn
260 260 def lfmatchfn(f):
261 261 lf = lfutil.splitstandin(f)
262 262 if lf is not None and origmatchfn(lf):
263 263 return True
264 264 r = origmatchfn(f)
265 265 return r
266 266 m.matchfn = lfmatchfn
267 267 return m
268 268 oldmatch = installmatchfn(overridematch)
269 269 try:
270 270 repo.lfstatus = True
271 271 return orig(ui, repo, *pats, **opts)
272 272 finally:
273 273 repo.lfstatus = False
274 274 restorematchfn()
275 275
276 276 def overrideverify(orig, ui, repo, *pats, **opts):
277 277 large = opts.pop('large', False)
278 278 all = opts.pop('lfa', False)
279 279 contents = opts.pop('lfc', False)
280 280
281 281 result = orig(ui, repo, *pats, **opts)
282 282 if large or all or contents:
283 283 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
284 284 return result
285 285
286 286 def overridedebugstate(orig, ui, repo, *pats, **opts):
287 287 large = opts.pop('large', False)
288 288 if large:
289 289 lfcommands.debugdirstate(ui, repo)
290 290 else:
291 291 orig(ui, repo, *pats, **opts)
292 292
293 293 # Override needs to refresh standins so that update's normal merge
294 294 # will go through properly. Then the other update hook (overriding repo.update)
295 295 # will get the new files. Filemerge is also overridden so that the merge
296 296 # will merge standins correctly.
297 297 def overrideupdate(orig, ui, repo, *pats, **opts):
298 298 lfdirstate = lfutil.openlfdirstate(ui, repo)
299 299 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
300 300 False, False)
301 301 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
302 302
303 303 # Need to lock between the standins getting updated and their
304 304 # largefiles getting updated
305 305 wlock = repo.wlock()
306 306 try:
307 307 if opts['check']:
308 308 mod = len(modified) > 0
309 309 for lfile in unsure:
310 310 standin = lfutil.standin(lfile)
311 311 if repo['.'][standin].data().strip() != \
312 312 lfutil.hashfile(repo.wjoin(lfile)):
313 313 mod = True
314 314 else:
315 315 lfdirstate.normal(lfile)
316 316 lfdirstate.write()
317 317 if mod:
318 318 raise util.Abort(_('uncommitted changes'))
319 319 # XXX handle removed differently
320 320 if not opts['clean']:
321 321 for lfile in unsure + modified + added:
322 322 lfutil.updatestandin(repo, lfutil.standin(lfile))
323 323 finally:
324 324 wlock.release()
325 325 return orig(ui, repo, *pats, **opts)
326 326
327 327 # Before starting the manifest merge, merge.updates will call
328 328 # _checkunknown to check if there are any files in the merged-in
329 329 # changeset that collide with unknown files in the working copy.
330 330 #
331 331 # The largefiles are seen as unknown, so this prevents us from merging
332 332 # in a file 'foo' if we already have a largefile with the same name.
333 333 #
334 334 # The overridden function filters the unknown files by removing any
335 335 # largefiles. This makes the merge proceed and we can then handle this
336 336 # case further in the overridden manifestmerge function below.
337 337 def overridecheckunknownfile(origfn, repo, wctx, mctx, f):
338 338 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
339 339 return False
340 340 return origfn(repo, wctx, mctx, f)
341 341
342 342 # The manifest merge handles conflicts on the manifest level. We want
343 343 # to handle changes in largefile-ness of files at this level too.
344 344 #
345 345 # The strategy is to run the original manifestmerge and then process
346 346 # the action list it outputs. There are two cases we need to deal with:
347 347 #
348 348 # 1. Normal file in p1, largefile in p2. Here the largefile is
349 349 # detected via its standin file, which will enter the working copy
350 350 # with a "get" action. It is not "merge" since the standin is all
351 351 # Mercurial is concerned with at this level -- the link to the
352 352 # existing normal file is not relevant here.
353 353 #
354 354 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
355 355 # since the largefile will be present in the working copy and
356 356 # different from the normal file in p2. Mercurial therefore
357 357 # triggers a merge action.
358 358 #
359 359 # In both cases, we prompt the user and emit new actions to either
360 360 # remove the standin (if the normal file was kept) or to remove the
361 361 # normal file and get the standin (if the largefile was kept). The
362 362 # default prompt answer is to use the largefile version since it was
363 363 # presumably changed on purpose.
364 364 #
365 365 # Finally, the merge.applyupdates function will then take care of
366 366 # writing the files into the working copy and lfcommands.updatelfiles
367 367 # will update the largefiles.
368 368 def overridemanifestmerge(origfn, repo, p1, p2, pa, branchmerge, force,
369 369 partial, acceptremote=False):
370 370 overwrite = force and not branchmerge
371 371 actions = origfn(repo, p1, p2, pa, branchmerge, force, partial,
372 372 acceptremote)
373 373
374 374 if overwrite:
375 375 return actions
376 376
377 377 removes = set(a[0] for a in actions if a[1] == 'r')
378 378 processed = []
379 379
380 380 for action in actions:
381 381 f, m, args, msg = action
382 382
383 383 splitstandin = lfutil.splitstandin(f)
384 384 if (m == "g" and splitstandin is not None and
385 385 splitstandin in p1 and splitstandin not in removes):
386 386 # Case 1: normal file in the working copy, largefile in
387 387 # the second parent
388 388 lfile = splitstandin
389 389 standin = f
390 390 msg = _('remote turned local normal file %s into a largefile\n'
391 391 'use (l)argefile or keep (n)ormal file?'
392 392 '$$ &Largefile $$ &Normal file') % lfile
393 393 if repo.ui.promptchoice(msg, 0) == 0:
394 394 processed.append((lfile, "r", None, msg))
395 395 processed.append((standin, "g", (p2.flags(standin),), msg))
396 396 else:
397 397 processed.append((standin, "r", None, msg))
398 398 elif (m == "g" and
399 399 lfutil.standin(f) in p1 and lfutil.standin(f) not in removes):
400 400 # Case 2: largefile in the working copy, normal file in
401 401 # the second parent
402 402 standin = lfutil.standin(f)
403 403 lfile = f
404 404 msg = _('remote turned local largefile %s into a normal file\n'
405 405 'keep (l)argefile or use (n)ormal file?'
406 406 '$$ &Largefile $$ &Normal file') % lfile
407 407 if repo.ui.promptchoice(msg, 0) == 0:
408 408 processed.append((lfile, "r", None, msg))
409 409 else:
410 410 processed.append((standin, "r", None, msg))
411 411 processed.append((lfile, "g", (p2.flags(lfile),), msg))
412 412 else:
413 413 processed.append(action)
414 414
415 415 return processed
416 416
417 417 # Override filemerge to prompt the user about how they wish to merge
418 418 # largefiles. This will handle identical edits, and copy/rename +
419 419 # edit without prompting the user.
420 420 def overridefilemerge(origfn, repo, mynode, orig, fcd, fco, fca):
421 421 # Use better variable names here. Because this is a wrapper we cannot
422 422 # change the variable names in the function declaration.
423 423 fcdest, fcother, fcancestor = fcd, fco, fca
424 424 if not lfutil.isstandin(orig):
425 425 return origfn(repo, mynode, orig, fcdest, fcother, fcancestor)
426 426 else:
427 427 if not fcother.cmp(fcdest): # files identical?
428 428 return None
429 429
430 430 # backwards, use working dir parent as ancestor
431 431 if fcancestor == fcother:
432 432 fcancestor = fcdest.parents()[0]
433 433
434 434 if orig != fcother.path():
435 435 repo.ui.status(_('merging %s and %s to %s\n')
436 436 % (lfutil.splitstandin(orig),
437 437 lfutil.splitstandin(fcother.path()),
438 438 lfutil.splitstandin(fcdest.path())))
439 439 else:
440 440 repo.ui.status(_('merging %s\n')
441 441 % lfutil.splitstandin(fcdest.path()))
442 442
443 443 if fcancestor.path() != fcother.path() and fcother.data() == \
444 444 fcancestor.data():
445 445 return 0
446 446 if fcancestor.path() != fcdest.path() and fcdest.data() == \
447 447 fcancestor.data():
448 448 repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
449 449 return 0
450 450
451 451 if repo.ui.promptchoice(_('largefile %s has a merge conflict\n'
452 452 'keep (l)ocal or take (o)ther?'
453 453 '$$ &Local $$ &Other') %
454 454 lfutil.splitstandin(orig), 0) == 0:
455 455 return 0
456 456 else:
457 457 repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
458 458 return 0
459 459
460 460 # Copy first changes the matchers to match standins instead of
461 461 # largefiles. Then it overrides util.copyfile in that function it
462 462 # checks if the destination largefile already exists. It also keeps a
463 463 # list of copied files so that the largefiles can be copied and the
464 464 # dirstate updated.
465 465 def overridecopy(orig, ui, repo, pats, opts, rename=False):
466 466 # doesn't remove largefile on rename
467 467 if len(pats) < 2:
468 468 # this isn't legal, let the original function deal with it
469 469 return orig(ui, repo, pats, opts, rename)
470 470
471 471 def makestandin(relpath):
472 path = scmutil.canonpath(repo.root, repo.getcwd(), relpath)
472 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
473 473 return os.path.join(repo.wjoin(lfutil.standin(path)))
474 474
475 475 fullpats = scmutil.expandpats(pats)
476 476 dest = fullpats[-1]
477 477
478 478 if os.path.isdir(dest):
479 479 if not os.path.isdir(makestandin(dest)):
480 480 os.makedirs(makestandin(dest))
481 481 # This could copy both lfiles and normal files in one command,
482 482 # but we don't want to do that. First replace their matcher to
483 483 # only match normal files and run it, then replace it to just
484 484 # match largefiles and run it again.
485 485 nonormalfiles = False
486 486 nolfiles = False
487 487 try:
488 488 try:
489 489 installnormalfilesmatchfn(repo[None].manifest())
490 490 result = orig(ui, repo, pats, opts, rename)
491 491 except util.Abort, e:
492 492 if str(e) != _('no files to copy'):
493 493 raise e
494 494 else:
495 495 nonormalfiles = True
496 496 result = 0
497 497 finally:
498 498 restorematchfn()
499 499
500 500 # The first rename can cause our current working directory to be removed.
501 501 # In that case there is nothing left to copy/rename so just quit.
502 502 try:
503 503 repo.getcwd()
504 504 except OSError:
505 505 return result
506 506
507 507 try:
508 508 try:
509 509 # When we call orig below it creates the standins but we don't add
510 510 # them to the dir state until later so lock during that time.
511 511 wlock = repo.wlock()
512 512
513 513 manifest = repo[None].manifest()
514 514 oldmatch = None # for the closure
515 515 def overridematch(ctx, pats=[], opts={}, globbed=False,
516 516 default='relpath'):
517 517 newpats = []
518 518 # The patterns were previously mangled to add the standin
519 519 # directory; we need to remove that now
520 520 for pat in pats:
521 521 if match_.patkind(pat) is None and lfutil.shortname in pat:
522 522 newpats.append(pat.replace(lfutil.shortname, ''))
523 523 else:
524 524 newpats.append(pat)
525 525 match = oldmatch(ctx, newpats, opts, globbed, default)
526 526 m = copy.copy(match)
527 527 lfile = lambda f: lfutil.standin(f) in manifest
528 528 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
529 529 m._fmap = set(m._files)
530 530 m._always = False
531 531 origmatchfn = m.matchfn
532 532 m.matchfn = lambda f: (lfutil.isstandin(f) and
533 533 (f in manifest) and
534 534 origmatchfn(lfutil.splitstandin(f)) or
535 535 None)
536 536 return m
537 537 oldmatch = installmatchfn(overridematch)
538 538 listpats = []
539 539 for pat in pats:
540 540 if match_.patkind(pat) is not None:
541 541 listpats.append(pat)
542 542 else:
543 543 listpats.append(makestandin(pat))
544 544
545 545 try:
546 546 origcopyfile = util.copyfile
547 547 copiedfiles = []
548 548 def overridecopyfile(src, dest):
549 549 if (lfutil.shortname in src and
550 550 dest.startswith(repo.wjoin(lfutil.shortname))):
551 551 destlfile = dest.replace(lfutil.shortname, '')
552 552 if not opts['force'] and os.path.exists(destlfile):
553 553 raise IOError('',
554 554 _('destination largefile already exists'))
555 555 copiedfiles.append((src, dest))
556 556 origcopyfile(src, dest)
557 557
558 558 util.copyfile = overridecopyfile
559 559 result += orig(ui, repo, listpats, opts, rename)
560 560 finally:
561 561 util.copyfile = origcopyfile
562 562
563 563 lfdirstate = lfutil.openlfdirstate(ui, repo)
564 564 for (src, dest) in copiedfiles:
565 565 if (lfutil.shortname in src and
566 566 dest.startswith(repo.wjoin(lfutil.shortname))):
567 567 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
568 568 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
569 569 destlfiledir = os.path.dirname(repo.wjoin(destlfile)) or '.'
570 570 if not os.path.isdir(destlfiledir):
571 571 os.makedirs(destlfiledir)
572 572 if rename:
573 573 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
574 574 lfdirstate.remove(srclfile)
575 575 else:
576 576 util.copyfile(repo.wjoin(srclfile),
577 577 repo.wjoin(destlfile))
578 578
579 579 lfdirstate.add(destlfile)
580 580 lfdirstate.write()
581 581 except util.Abort, e:
582 582 if str(e) != _('no files to copy'):
583 583 raise e
584 584 else:
585 585 nolfiles = True
586 586 finally:
587 587 restorematchfn()
588 588 wlock.release()
589 589
590 590 if nolfiles and nonormalfiles:
591 591 raise util.Abort(_('no files to copy'))
592 592
593 593 return result
594 594
595 595 # When the user calls revert, we have to be careful to not revert any
596 596 # changes to other largefiles accidentally. This means we have to keep
597 597 # track of the largefiles that are being reverted so we only pull down
598 598 # the necessary largefiles.
599 599 #
600 600 # Standins are only updated (to match the hash of largefiles) before
601 601 # commits. Update the standins then run the original revert, changing
602 602 # the matcher to hit standins instead of largefiles. Based on the
603 603 # resulting standins update the largefiles. Then return the standins
604 604 # to their proper state
605 605 def overriderevert(orig, ui, repo, *pats, **opts):
606 606 # Because we put the standins in a bad state (by updating them)
607 607 # and then return them to a correct state we need to lock to
608 608 # prevent others from changing them in their incorrect state.
609 609 wlock = repo.wlock()
610 610 try:
611 611 lfdirstate = lfutil.openlfdirstate(ui, repo)
612 612 (modified, added, removed, missing, unknown, ignored, clean) = \
613 613 lfutil.lfdirstatestatus(lfdirstate, repo, repo['.'].rev())
614 614 lfdirstate.write()
615 615 for lfile in modified:
616 616 lfutil.updatestandin(repo, lfutil.standin(lfile))
617 617 for lfile in missing:
618 618 if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
619 619 os.unlink(repo.wjoin(lfutil.standin(lfile)))
620 620
621 621 try:
622 622 ctx = scmutil.revsingle(repo, opts.get('rev'))
623 623 oldmatch = None # for the closure
624 624 def overridematch(ctx, pats=[], opts={}, globbed=False,
625 625 default='relpath'):
626 626 match = oldmatch(ctx, pats, opts, globbed, default)
627 627 m = copy.copy(match)
628 628 def tostandin(f):
629 629 if lfutil.standin(f) in ctx:
630 630 return lfutil.standin(f)
631 631 elif lfutil.standin(f) in repo[None]:
632 632 return None
633 633 return f
634 634 m._files = [tostandin(f) for f in m._files]
635 635 m._files = [f for f in m._files if f is not None]
636 636 m._fmap = set(m._files)
637 637 m._always = False
638 638 origmatchfn = m.matchfn
639 639 def matchfn(f):
640 640 if lfutil.isstandin(f):
641 641 # We need to keep track of what largefiles are being
642 642 # matched so we know which ones to update later --
643 643 # otherwise we accidentally revert changes to other
644 644 # largefiles. This is repo-specific, so duckpunch the
645 645 # repo object to keep the list of largefiles for us
646 646 # later.
647 647 if origmatchfn(lfutil.splitstandin(f)) and \
648 648 (f in repo[None] or f in ctx):
649 649 lfileslist = getattr(repo, '_lfilestoupdate', [])
650 650 lfileslist.append(lfutil.splitstandin(f))
651 651 repo._lfilestoupdate = lfileslist
652 652 return True
653 653 else:
654 654 return False
655 655 return origmatchfn(f)
656 656 m.matchfn = matchfn
657 657 return m
658 658 oldmatch = installmatchfn(overridematch)
659 659 scmutil.match
660 660 matches = overridematch(repo[None], pats, opts)
661 661 orig(ui, repo, *pats, **opts)
662 662 finally:
663 663 restorematchfn()
664 664 lfileslist = getattr(repo, '_lfilestoupdate', [])
665 665 lfcommands.updatelfiles(ui, repo, filelist=lfileslist,
666 666 printmessage=False)
667 667
668 668 # empty out the largefiles list so we start fresh next time
669 669 repo._lfilestoupdate = []
670 670 for lfile in modified:
671 671 if lfile in lfileslist:
672 672 if os.path.exists(repo.wjoin(lfutil.standin(lfile))) and lfile\
673 673 in repo['.']:
674 674 lfutil.writestandin(repo, lfutil.standin(lfile),
675 675 repo['.'][lfile].data().strip(),
676 676 'x' in repo['.'][lfile].flags())
677 677 lfdirstate = lfutil.openlfdirstate(ui, repo)
678 678 for lfile in added:
679 679 standin = lfutil.standin(lfile)
680 680 if standin not in ctx and (standin in matches or opts.get('all')):
681 681 if lfile in lfdirstate:
682 682 lfdirstate.drop(lfile)
683 683 util.unlinkpath(repo.wjoin(standin))
684 684 lfdirstate.write()
685 685 finally:
686 686 wlock.release()
687 687
688 688 def hgupdaterepo(orig, repo, node, overwrite):
689 689 if not overwrite:
690 690 # Only call updatelfiles on the standins that have changed to save time
691 691 oldstandins = lfutil.getstandinsstate(repo)
692 692
693 693 result = orig(repo, node, overwrite)
694 694
695 695 filelist = None
696 696 if not overwrite:
697 697 newstandins = lfutil.getstandinsstate(repo)
698 698 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
699 699 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist)
700 700 return result
701 701
702 702 def hgmerge(orig, repo, node, force=None, remind=True):
703 703 result = orig(repo, node, force, remind)
704 704 lfcommands.updatelfiles(repo.ui, repo)
705 705 return result
706 706
707 707 # When we rebase a repository with remotely changed largefiles, we need to
708 708 # take some extra care so that the largefiles are correctly updated in the
709 709 # working copy
710 710 def overridepull(orig, ui, repo, source=None, **opts):
711 711 revsprepull = len(repo)
712 712 if not source:
713 713 source = 'default'
714 714 repo.lfpullsource = source
715 715 if opts.get('rebase', False):
716 716 repo._isrebasing = True
717 717 try:
718 718 if opts.get('update'):
719 719 del opts['update']
720 720 ui.debug('--update and --rebase are not compatible, ignoring '
721 721 'the update flag\n')
722 722 del opts['rebase']
723 723 origpostincoming = commands.postincoming
724 724 def _dummy(*args, **kwargs):
725 725 pass
726 726 commands.postincoming = _dummy
727 727 try:
728 728 result = commands.pull(ui, repo, source, **opts)
729 729 finally:
730 730 commands.postincoming = origpostincoming
731 731 revspostpull = len(repo)
732 732 if revspostpull > revsprepull:
733 733 result = result or rebase.rebase(ui, repo)
734 734 finally:
735 735 repo._isrebasing = False
736 736 else:
737 737 result = orig(ui, repo, source, **opts)
738 738 revspostpull = len(repo)
739 739 lfrevs = opts.get('lfrev', [])
740 740 if opts.get('all_largefiles'):
741 741 lfrevs.append('pulled()')
742 742 if lfrevs and revspostpull > revsprepull:
743 743 numcached = 0
744 744 repo.firstpulled = revsprepull # for pulled() revset expression
745 745 try:
746 746 for rev in scmutil.revrange(repo, lfrevs):
747 747 ui.note(_('pulling largefiles for revision %s\n') % rev)
748 748 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
749 749 numcached += len(cached)
750 750 finally:
751 751 del repo.firstpulled
752 752 ui.status(_("%d largefiles cached\n") % numcached)
753 753 return result
754 754
755 755 def pulledrevsetsymbol(repo, subset, x):
756 756 """``pulled()``
757 757 Changesets that just has been pulled.
758 758
759 759 Only available with largefiles from pull --lfrev expressions.
760 760
761 761 .. container:: verbose
762 762
763 763 Some examples:
764 764
765 765 - pull largefiles for all new changesets::
766 766
767 767 hg pull -lfrev "pulled()"
768 768
769 769 - pull largefiles for all new branch heads::
770 770
771 771 hg pull -lfrev "head(pulled()) and not closed()"
772 772
773 773 """
774 774
775 775 try:
776 776 firstpulled = repo.firstpulled
777 777 except AttributeError:
778 778 raise util.Abort(_("pulled() only available in --lfrev"))
779 779 return [r for r in subset if r >= firstpulled]
780 780
781 781 def overrideclone(orig, ui, source, dest=None, **opts):
782 782 d = dest
783 783 if d is None:
784 784 d = hg.defaultdest(source)
785 785 if opts.get('all_largefiles') and not hg.islocal(d):
786 786 raise util.Abort(_(
787 787 '--all-largefiles is incompatible with non-local destination %s' %
788 788 d))
789 789
790 790 return orig(ui, source, dest, **opts)
791 791
792 792 def hgclone(orig, ui, opts, *args, **kwargs):
793 793 result = orig(ui, opts, *args, **kwargs)
794 794
795 795 if result is not None:
796 796 sourcerepo, destrepo = result
797 797 repo = destrepo.local()
798 798
799 799 # Caching is implicitly limited to 'rev' option, since the dest repo was
800 800 # truncated at that point. The user may expect a download count with
801 801 # this option, so attempt whether or not this is a largefile repo.
802 802 if opts.get('all_largefiles'):
803 803 success, missing = lfcommands.downloadlfiles(ui, repo, None)
804 804
805 805 if missing != 0:
806 806 return None
807 807
808 808 return result
809 809
810 810 def overriderebase(orig, ui, repo, **opts):
811 811 repo._isrebasing = True
812 812 try:
813 813 return orig(ui, repo, **opts)
814 814 finally:
815 815 repo._isrebasing = False
816 816
817 817 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
818 818 prefix=None, mtime=None, subrepos=None):
819 819 # No need to lock because we are only reading history and
820 820 # largefile caches, neither of which are modified.
821 821 lfcommands.cachelfiles(repo.ui, repo, node)
822 822
823 823 if kind not in archival.archivers:
824 824 raise util.Abort(_("unknown archive type '%s'") % kind)
825 825
826 826 ctx = repo[node]
827 827
828 828 if kind == 'files':
829 829 if prefix:
830 830 raise util.Abort(
831 831 _('cannot give prefix when archiving to files'))
832 832 else:
833 833 prefix = archival.tidyprefix(dest, kind, prefix)
834 834
835 835 def write(name, mode, islink, getdata):
836 836 if matchfn and not matchfn(name):
837 837 return
838 838 data = getdata()
839 839 if decode:
840 840 data = repo.wwritedata(name, data)
841 841 archiver.addfile(prefix + name, mode, islink, data)
842 842
843 843 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
844 844
845 845 if repo.ui.configbool("ui", "archivemeta", True):
846 846 def metadata():
847 847 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
848 848 hex(repo.changelog.node(0)), hex(node), ctx.branch())
849 849
850 850 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
851 851 if repo.tagtype(t) == 'global')
852 852 if not tags:
853 853 repo.ui.pushbuffer()
854 854 opts = {'template': '{latesttag}\n{latesttagdistance}',
855 855 'style': '', 'patch': None, 'git': None}
856 856 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
857 857 ltags, dist = repo.ui.popbuffer().split('\n')
858 858 tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
859 859 tags += 'latesttagdistance: %s\n' % dist
860 860
861 861 return base + tags
862 862
863 863 write('.hg_archival.txt', 0644, False, metadata)
864 864
865 865 for f in ctx:
866 866 ff = ctx.flags(f)
867 867 getdata = ctx[f].data
868 868 if lfutil.isstandin(f):
869 869 path = lfutil.findfile(repo, getdata().strip())
870 870 if path is None:
871 871 raise util.Abort(
872 872 _('largefile %s not found in repo store or system cache')
873 873 % lfutil.splitstandin(f))
874 874 f = lfutil.splitstandin(f)
875 875
876 876 def getdatafn():
877 877 fd = None
878 878 try:
879 879 fd = open(path, 'rb')
880 880 return fd.read()
881 881 finally:
882 882 if fd:
883 883 fd.close()
884 884
885 885 getdata = getdatafn
886 886 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
887 887
888 888 if subrepos:
889 889 for subpath in sorted(ctx.substate):
890 890 sub = ctx.sub(subpath)
891 891 submatch = match_.narrowmatcher(subpath, matchfn)
892 892 sub.archive(repo.ui, archiver, prefix, submatch)
893 893
894 894 archiver.done()
895 895
896 896 def hgsubrepoarchive(orig, repo, ui, archiver, prefix, match=None):
897 897 repo._get(repo._state + ('hg',))
898 898 rev = repo._state[1]
899 899 ctx = repo._repo[rev]
900 900
901 901 lfcommands.cachelfiles(ui, repo._repo, ctx.node())
902 902
903 903 def write(name, mode, islink, getdata):
904 904 # At this point, the standin has been replaced with the largefile name,
905 905 # so the normal matcher works here without the lfutil variants.
906 906 if match and not match(f):
907 907 return
908 908 data = getdata()
909 909
910 910 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
911 911
912 912 for f in ctx:
913 913 ff = ctx.flags(f)
914 914 getdata = ctx[f].data
915 915 if lfutil.isstandin(f):
916 916 path = lfutil.findfile(repo._repo, getdata().strip())
917 917 if path is None:
918 918 raise util.Abort(
919 919 _('largefile %s not found in repo store or system cache')
920 920 % lfutil.splitstandin(f))
921 921 f = lfutil.splitstandin(f)
922 922
923 923 def getdatafn():
924 924 fd = None
925 925 try:
926 926 fd = open(os.path.join(prefix, path), 'rb')
927 927 return fd.read()
928 928 finally:
929 929 if fd:
930 930 fd.close()
931 931
932 932 getdata = getdatafn
933 933
934 934 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
935 935
936 936 for subpath in sorted(ctx.substate):
937 937 sub = ctx.sub(subpath)
938 938 submatch = match_.narrowmatcher(subpath, match)
939 939 sub.archive(ui, archiver, os.path.join(prefix, repo._path) + '/',
940 940 submatch)
941 941
942 942 # If a largefile is modified, the change is not reflected in its
943 943 # standin until a commit. cmdutil.bailifchanged() raises an exception
944 944 # if the repo has uncommitted changes. Wrap it to also check if
945 945 # largefiles were changed. This is used by bisect and backout.
946 946 def overridebailifchanged(orig, repo):
947 947 orig(repo)
948 948 repo.lfstatus = True
949 949 modified, added, removed, deleted = repo.status()[:4]
950 950 repo.lfstatus = False
951 951 if modified or added or removed or deleted:
952 952 raise util.Abort(_('uncommitted changes'))
953 953
954 954 # Fetch doesn't use cmdutil.bailifchanged so override it to add the check
955 955 def overridefetch(orig, ui, repo, *pats, **opts):
956 956 repo.lfstatus = True
957 957 modified, added, removed, deleted = repo.status()[:4]
958 958 repo.lfstatus = False
959 959 if modified or added or removed or deleted:
960 960 raise util.Abort(_('uncommitted changes'))
961 961 return orig(ui, repo, *pats, **opts)
962 962
963 963 def overrideforget(orig, ui, repo, *pats, **opts):
964 964 installnormalfilesmatchfn(repo[None].manifest())
965 965 result = orig(ui, repo, *pats, **opts)
966 966 restorematchfn()
967 967 m = scmutil.match(repo[None], pats, opts)
968 968
969 969 try:
970 970 repo.lfstatus = True
971 971 s = repo.status(match=m, clean=True)
972 972 finally:
973 973 repo.lfstatus = False
974 974 forget = sorted(s[0] + s[1] + s[3] + s[6])
975 975 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
976 976
977 977 for f in forget:
978 978 if lfutil.standin(f) not in repo.dirstate and not \
979 979 os.path.isdir(m.rel(lfutil.standin(f))):
980 980 ui.warn(_('not removing %s: file is already untracked\n')
981 981 % m.rel(f))
982 982 result = 1
983 983
984 984 for f in forget:
985 985 if ui.verbose or not m.exact(f):
986 986 ui.status(_('removing %s\n') % m.rel(f))
987 987
988 988 # Need to lock because standin files are deleted then removed from the
989 989 # repository and we could race in-between.
990 990 wlock = repo.wlock()
991 991 try:
992 992 lfdirstate = lfutil.openlfdirstate(ui, repo)
993 993 for f in forget:
994 994 if lfdirstate[f] == 'a':
995 995 lfdirstate.drop(f)
996 996 else:
997 997 lfdirstate.remove(f)
998 998 lfdirstate.write()
999 999 standins = [lfutil.standin(f) for f in forget]
1000 1000 for f in standins:
1001 1001 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
1002 1002 repo[None].forget(standins)
1003 1003 finally:
1004 1004 wlock.release()
1005 1005
1006 1006 return result
1007 1007
1008 1008 def getoutgoinglfiles(ui, repo, dest=None, **opts):
1009 1009 dest = ui.expandpath(dest or 'default-push', dest or 'default')
1010 1010 dest, branches = hg.parseurl(dest, opts.get('branch'))
1011 1011 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev'))
1012 1012 if revs:
1013 1013 revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
1014 1014
1015 1015 try:
1016 1016 remote = hg.peer(repo, opts, dest)
1017 1017 except error.RepoError:
1018 1018 return None
1019 1019 outgoing = discovery.findcommonoutgoing(repo, remote.peer(), force=False)
1020 1020 if not outgoing.missing:
1021 1021 return outgoing.missing
1022 1022 o = repo.changelog.nodesbetween(outgoing.missing, revs)[0]
1023 1023 if opts.get('newest_first'):
1024 1024 o.reverse()
1025 1025
1026 1026 toupload = set()
1027 1027 for n in o:
1028 1028 parents = [p for p in repo.changelog.parents(n) if p != node.nullid]
1029 1029 ctx = repo[n]
1030 1030 files = set(ctx.files())
1031 1031 if len(parents) == 2:
1032 1032 mc = ctx.manifest()
1033 1033 mp1 = ctx.parents()[0].manifest()
1034 1034 mp2 = ctx.parents()[1].manifest()
1035 1035 for f in mp1:
1036 1036 if f not in mc:
1037 1037 files.add(f)
1038 1038 for f in mp2:
1039 1039 if f not in mc:
1040 1040 files.add(f)
1041 1041 for f in mc:
1042 1042 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
1043 1043 files.add(f)
1044 1044 toupload = toupload.union(
1045 1045 set([f for f in files if lfutil.isstandin(f) and f in ctx]))
1046 1046 return sorted(toupload)
1047 1047
1048 1048 def overrideoutgoing(orig, ui, repo, dest=None, **opts):
1049 1049 result = orig(ui, repo, dest, **opts)
1050 1050
1051 1051 if opts.pop('large', None):
1052 1052 toupload = getoutgoinglfiles(ui, repo, dest, **opts)
1053 1053 if toupload is None:
1054 1054 ui.status(_('largefiles: No remote repo\n'))
1055 1055 elif not toupload:
1056 1056 ui.status(_('largefiles: no files to upload\n'))
1057 1057 else:
1058 1058 ui.status(_('largefiles to upload:\n'))
1059 1059 for file in toupload:
1060 1060 ui.status(lfutil.splitstandin(file) + '\n')
1061 1061 ui.status('\n')
1062 1062
1063 1063 return result
1064 1064
1065 1065 def overridesummary(orig, ui, repo, *pats, **opts):
1066 1066 try:
1067 1067 repo.lfstatus = True
1068 1068 orig(ui, repo, *pats, **opts)
1069 1069 finally:
1070 1070 repo.lfstatus = False
1071 1071
1072 1072 if opts.pop('large', None):
1073 1073 toupload = getoutgoinglfiles(ui, repo, None, **opts)
1074 1074 if toupload is None:
1075 1075 # i18n: column positioning for "hg summary"
1076 1076 ui.status(_('largefiles: (no remote repo)\n'))
1077 1077 elif not toupload:
1078 1078 # i18n: column positioning for "hg summary"
1079 1079 ui.status(_('largefiles: (no files to upload)\n'))
1080 1080 else:
1081 1081 # i18n: column positioning for "hg summary"
1082 1082 ui.status(_('largefiles: %d to upload\n') % len(toupload))
1083 1083
1084 1084 def scmutiladdremove(orig, repo, pats=[], opts={}, dry_run=None,
1085 1085 similarity=None):
1086 1086 if not lfutil.islfilesrepo(repo):
1087 1087 return orig(repo, pats, opts, dry_run, similarity)
1088 1088 # Get the list of missing largefiles so we can remove them
1089 1089 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1090 1090 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
1091 1091 False, False)
1092 1092 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
1093 1093
1094 1094 # Call into the normal remove code, but the removing of the standin, we want
1095 1095 # to have handled by original addremove. Monkey patching here makes sure
1096 1096 # we don't remove the standin in the largefiles code, preventing a very
1097 1097 # confused state later.
1098 1098 if missing:
1099 1099 m = [repo.wjoin(f) for f in missing]
1100 1100 repo._isaddremove = True
1101 1101 removelargefiles(repo.ui, repo, *m, **opts)
1102 1102 repo._isaddremove = False
1103 1103 # Call into the normal add code, and any files that *should* be added as
1104 1104 # largefiles will be
1105 1105 addlargefiles(repo.ui, repo, *pats, **opts)
1106 1106 # Now that we've handled largefiles, hand off to the original addremove
1107 1107 # function to take care of the rest. Make sure it doesn't do anything with
1108 1108 # largefiles by installing a matcher that will ignore them.
1109 1109 installnormalfilesmatchfn(repo[None].manifest())
1110 1110 result = orig(repo, pats, opts, dry_run, similarity)
1111 1111 restorematchfn()
1112 1112 return result
1113 1113
1114 1114 # Calling purge with --all will cause the largefiles to be deleted.
1115 1115 # Override repo.status to prevent this from happening.
1116 1116 def overridepurge(orig, ui, repo, *dirs, **opts):
1117 1117 # XXX large file status is buggy when used on repo proxy.
1118 1118 # XXX this needs to be investigate.
1119 1119 repo = repo.unfiltered()
1120 1120 oldstatus = repo.status
1121 1121 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1122 1122 clean=False, unknown=False, listsubrepos=False):
1123 1123 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1124 1124 listsubrepos)
1125 1125 lfdirstate = lfutil.openlfdirstate(ui, repo)
1126 1126 modified, added, removed, deleted, unknown, ignored, clean = r
1127 1127 unknown = [f for f in unknown if lfdirstate[f] == '?']
1128 1128 ignored = [f for f in ignored if lfdirstate[f] == '?']
1129 1129 return modified, added, removed, deleted, unknown, ignored, clean
1130 1130 repo.status = overridestatus
1131 1131 orig(ui, repo, *dirs, **opts)
1132 1132 repo.status = oldstatus
1133 1133
1134 1134 def overriderollback(orig, ui, repo, **opts):
1135 1135 result = orig(ui, repo, **opts)
1136 1136 merge.update(repo, node=None, branchmerge=False, force=True,
1137 1137 partial=lfutil.isstandin)
1138 1138 wlock = repo.wlock()
1139 1139 try:
1140 1140 lfdirstate = lfutil.openlfdirstate(ui, repo)
1141 1141 lfiles = lfutil.listlfiles(repo)
1142 1142 oldlfiles = lfutil.listlfiles(repo, repo[None].parents()[0].rev())
1143 1143 for file in lfiles:
1144 1144 if file in oldlfiles:
1145 1145 lfdirstate.normallookup(file)
1146 1146 else:
1147 1147 lfdirstate.add(file)
1148 1148 lfdirstate.write()
1149 1149 finally:
1150 1150 wlock.release()
1151 1151 return result
1152 1152
1153 1153 def overridetransplant(orig, ui, repo, *revs, **opts):
1154 1154 try:
1155 1155 oldstandins = lfutil.getstandinsstate(repo)
1156 1156 repo._istransplanting = True
1157 1157 result = orig(ui, repo, *revs, **opts)
1158 1158 newstandins = lfutil.getstandinsstate(repo)
1159 1159 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1160 1160 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1161 1161 printmessage=True)
1162 1162 finally:
1163 1163 repo._istransplanting = False
1164 1164 return result
1165 1165
1166 1166 def overridecat(orig, ui, repo, file1, *pats, **opts):
1167 1167 ctx = scmutil.revsingle(repo, opts.get('rev'))
1168 1168 err = 1
1169 1169 notbad = set()
1170 1170 m = scmutil.match(ctx, (file1,) + pats, opts)
1171 1171 origmatchfn = m.matchfn
1172 1172 def lfmatchfn(f):
1173 1173 lf = lfutil.splitstandin(f)
1174 1174 if lf is None:
1175 1175 return origmatchfn(f)
1176 1176 notbad.add(lf)
1177 1177 return origmatchfn(lf)
1178 1178 m.matchfn = lfmatchfn
1179 1179 origbadfn = m.bad
1180 1180 def lfbadfn(f, msg):
1181 1181 if not f in notbad:
1182 1182 return origbadfn(f, msg)
1183 1183 m.bad = lfbadfn
1184 1184 for f in ctx.walk(m):
1185 1185 fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1186 1186 pathname=f)
1187 1187 lf = lfutil.splitstandin(f)
1188 1188 if lf is None:
1189 1189 # duplicating unreachable code from commands.cat
1190 1190 data = ctx[f].data()
1191 1191 if opts.get('decode'):
1192 1192 data = repo.wwritedata(f, data)
1193 1193 fp.write(data)
1194 1194 else:
1195 1195 hash = lfutil.readstandin(repo, lf, ctx.rev())
1196 1196 if not lfutil.inusercache(repo.ui, hash):
1197 1197 store = basestore._openstore(repo)
1198 1198 success, missing = store.get([(lf, hash)])
1199 1199 if len(success) != 1:
1200 1200 raise util.Abort(
1201 1201 _('largefile %s is not in cache and could not be '
1202 1202 'downloaded') % lf)
1203 1203 path = lfutil.usercachepath(repo.ui, hash)
1204 1204 fpin = open(path, "rb")
1205 1205 for chunk in util.filechunkiter(fpin, 128 * 1024):
1206 1206 fp.write(chunk)
1207 1207 fpin.close()
1208 1208 fp.close()
1209 1209 err = 0
1210 1210 return err
1211 1211
1212 1212 def mercurialsinkbefore(orig, sink):
1213 1213 sink.repo._isconverting = True
1214 1214 orig(sink)
1215 1215
1216 1216 def mercurialsinkafter(orig, sink):
1217 1217 sink.repo._isconverting = False
1218 1218 orig(sink)
@@ -1,2167 +1,2167 b''
1 1 # cmdutil.py - help for command processing in mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import os, sys, errno, re, tempfile
11 11 import util, scmutil, templater, patch, error, templatekw, revlog, copies
12 12 import match as matchmod
13 import subrepo, context, repair, graphmod, revset, phases, obsolete
13 import subrepo, context, repair, graphmod, revset, phases, obsolete, pathutil
14 14 import changelog
15 15 import bookmarks
16 16 import lock as lockmod
17 17
18 18 def parsealiases(cmd):
19 19 return cmd.lstrip("^").split("|")
20 20
21 21 def findpossible(cmd, table, strict=False):
22 22 """
23 23 Return cmd -> (aliases, command table entry)
24 24 for each matching command.
25 25 Return debug commands (or their aliases) only if no normal command matches.
26 26 """
27 27 choice = {}
28 28 debugchoice = {}
29 29
30 30 if cmd in table:
31 31 # short-circuit exact matches, "log" alias beats "^log|history"
32 32 keys = [cmd]
33 33 else:
34 34 keys = table.keys()
35 35
36 36 for e in keys:
37 37 aliases = parsealiases(e)
38 38 found = None
39 39 if cmd in aliases:
40 40 found = cmd
41 41 elif not strict:
42 42 for a in aliases:
43 43 if a.startswith(cmd):
44 44 found = a
45 45 break
46 46 if found is not None:
47 47 if aliases[0].startswith("debug") or found.startswith("debug"):
48 48 debugchoice[found] = (aliases, table[e])
49 49 else:
50 50 choice[found] = (aliases, table[e])
51 51
52 52 if not choice and debugchoice:
53 53 choice = debugchoice
54 54
55 55 return choice
56 56
57 57 def findcmd(cmd, table, strict=True):
58 58 """Return (aliases, command table entry) for command string."""
59 59 choice = findpossible(cmd, table, strict)
60 60
61 61 if cmd in choice:
62 62 return choice[cmd]
63 63
64 64 if len(choice) > 1:
65 65 clist = choice.keys()
66 66 clist.sort()
67 67 raise error.AmbiguousCommand(cmd, clist)
68 68
69 69 if choice:
70 70 return choice.values()[0]
71 71
72 72 raise error.UnknownCommand(cmd)
73 73
74 74 def findrepo(p):
75 75 while not os.path.isdir(os.path.join(p, ".hg")):
76 76 oldp, p = p, os.path.dirname(p)
77 77 if p == oldp:
78 78 return None
79 79
80 80 return p
81 81
82 82 def bailifchanged(repo):
83 83 if repo.dirstate.p2() != nullid:
84 84 raise util.Abort(_('outstanding uncommitted merge'))
85 85 modified, added, removed, deleted = repo.status()[:4]
86 86 if modified or added or removed or deleted:
87 87 raise util.Abort(_('uncommitted changes'))
88 88 ctx = repo[None]
89 89 for s in sorted(ctx.substate):
90 90 if ctx.sub(s).dirty():
91 91 raise util.Abort(_("uncommitted changes in subrepo %s") % s)
92 92
93 93 def logmessage(ui, opts):
94 94 """ get the log message according to -m and -l option """
95 95 message = opts.get('message')
96 96 logfile = opts.get('logfile')
97 97
98 98 if message and logfile:
99 99 raise util.Abort(_('options --message and --logfile are mutually '
100 100 'exclusive'))
101 101 if not message and logfile:
102 102 try:
103 103 if logfile == '-':
104 104 message = ui.fin.read()
105 105 else:
106 106 message = '\n'.join(util.readfile(logfile).splitlines())
107 107 except IOError, inst:
108 108 raise util.Abort(_("can't read commit message '%s': %s") %
109 109 (logfile, inst.strerror))
110 110 return message
111 111
112 112 def loglimit(opts):
113 113 """get the log limit according to option -l/--limit"""
114 114 limit = opts.get('limit')
115 115 if limit:
116 116 try:
117 117 limit = int(limit)
118 118 except ValueError:
119 119 raise util.Abort(_('limit must be a positive integer'))
120 120 if limit <= 0:
121 121 raise util.Abort(_('limit must be positive'))
122 122 else:
123 123 limit = None
124 124 return limit
125 125
126 126 def makefilename(repo, pat, node, desc=None,
127 127 total=None, seqno=None, revwidth=None, pathname=None):
128 128 node_expander = {
129 129 'H': lambda: hex(node),
130 130 'R': lambda: str(repo.changelog.rev(node)),
131 131 'h': lambda: short(node),
132 132 'm': lambda: re.sub('[^\w]', '_', str(desc))
133 133 }
134 134 expander = {
135 135 '%': lambda: '%',
136 136 'b': lambda: os.path.basename(repo.root),
137 137 }
138 138
139 139 try:
140 140 if node:
141 141 expander.update(node_expander)
142 142 if node:
143 143 expander['r'] = (lambda:
144 144 str(repo.changelog.rev(node)).zfill(revwidth or 0))
145 145 if total is not None:
146 146 expander['N'] = lambda: str(total)
147 147 if seqno is not None:
148 148 expander['n'] = lambda: str(seqno)
149 149 if total is not None and seqno is not None:
150 150 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
151 151 if pathname is not None:
152 152 expander['s'] = lambda: os.path.basename(pathname)
153 153 expander['d'] = lambda: os.path.dirname(pathname) or '.'
154 154 expander['p'] = lambda: pathname
155 155
156 156 newname = []
157 157 patlen = len(pat)
158 158 i = 0
159 159 while i < patlen:
160 160 c = pat[i]
161 161 if c == '%':
162 162 i += 1
163 163 c = pat[i]
164 164 c = expander[c]()
165 165 newname.append(c)
166 166 i += 1
167 167 return ''.join(newname)
168 168 except KeyError, inst:
169 169 raise util.Abort(_("invalid format spec '%%%s' in output filename") %
170 170 inst.args[0])
171 171
172 172 def makefileobj(repo, pat, node=None, desc=None, total=None,
173 173 seqno=None, revwidth=None, mode='wb', modemap=None,
174 174 pathname=None):
175 175
176 176 writable = mode not in ('r', 'rb')
177 177
178 178 if not pat or pat == '-':
179 179 fp = writable and repo.ui.fout or repo.ui.fin
180 180 if util.safehasattr(fp, 'fileno'):
181 181 return os.fdopen(os.dup(fp.fileno()), mode)
182 182 else:
183 183 # if this fp can't be duped properly, return
184 184 # a dummy object that can be closed
185 185 class wrappedfileobj(object):
186 186 noop = lambda x: None
187 187 def __init__(self, f):
188 188 self.f = f
189 189 def __getattr__(self, attr):
190 190 if attr == 'close':
191 191 return self.noop
192 192 else:
193 193 return getattr(self.f, attr)
194 194
195 195 return wrappedfileobj(fp)
196 196 if util.safehasattr(pat, 'write') and writable:
197 197 return pat
198 198 if util.safehasattr(pat, 'read') and 'r' in mode:
199 199 return pat
200 200 fn = makefilename(repo, pat, node, desc, total, seqno, revwidth, pathname)
201 201 if modemap is not None:
202 202 mode = modemap.get(fn, mode)
203 203 if mode == 'wb':
204 204 modemap[fn] = 'ab'
205 205 return open(fn, mode)
206 206
207 207 def openrevlog(repo, cmd, file_, opts):
208 208 """opens the changelog, manifest, a filelog or a given revlog"""
209 209 cl = opts['changelog']
210 210 mf = opts['manifest']
211 211 msg = None
212 212 if cl and mf:
213 213 msg = _('cannot specify --changelog and --manifest at the same time')
214 214 elif cl or mf:
215 215 if file_:
216 216 msg = _('cannot specify filename with --changelog or --manifest')
217 217 elif not repo:
218 218 msg = _('cannot specify --changelog or --manifest '
219 219 'without a repository')
220 220 if msg:
221 221 raise util.Abort(msg)
222 222
223 223 r = None
224 224 if repo:
225 225 if cl:
226 226 r = repo.changelog
227 227 elif mf:
228 228 r = repo.manifest
229 229 elif file_:
230 230 filelog = repo.file(file_)
231 231 if len(filelog):
232 232 r = filelog
233 233 if not r:
234 234 if not file_:
235 235 raise error.CommandError(cmd, _('invalid arguments'))
236 236 if not os.path.isfile(file_):
237 237 raise util.Abort(_("revlog '%s' not found") % file_)
238 238 r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False),
239 239 file_[:-2] + ".i")
240 240 return r
241 241
242 242 def copy(ui, repo, pats, opts, rename=False):
243 243 # called with the repo lock held
244 244 #
245 245 # hgsep => pathname that uses "/" to separate directories
246 246 # ossep => pathname that uses os.sep to separate directories
247 247 cwd = repo.getcwd()
248 248 targets = {}
249 249 after = opts.get("after")
250 250 dryrun = opts.get("dry_run")
251 251 wctx = repo[None]
252 252
253 253 def walkpat(pat):
254 254 srcs = []
255 255 badstates = after and '?' or '?r'
256 256 m = scmutil.match(repo[None], [pat], opts, globbed=True)
257 257 for abs in repo.walk(m):
258 258 state = repo.dirstate[abs]
259 259 rel = m.rel(abs)
260 260 exact = m.exact(abs)
261 261 if state in badstates:
262 262 if exact and state == '?':
263 263 ui.warn(_('%s: not copying - file is not managed\n') % rel)
264 264 if exact and state == 'r':
265 265 ui.warn(_('%s: not copying - file has been marked for'
266 266 ' remove\n') % rel)
267 267 continue
268 268 # abs: hgsep
269 269 # rel: ossep
270 270 srcs.append((abs, rel, exact))
271 271 return srcs
272 272
273 273 # abssrc: hgsep
274 274 # relsrc: ossep
275 275 # otarget: ossep
276 276 def copyfile(abssrc, relsrc, otarget, exact):
277 abstarget = scmutil.canonpath(repo.root, cwd, otarget)
277 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
278 278 if '/' in abstarget:
279 279 # We cannot normalize abstarget itself, this would prevent
280 280 # case only renames, like a => A.
281 281 abspath, absname = abstarget.rsplit('/', 1)
282 282 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
283 283 reltarget = repo.pathto(abstarget, cwd)
284 284 target = repo.wjoin(abstarget)
285 285 src = repo.wjoin(abssrc)
286 286 state = repo.dirstate[abstarget]
287 287
288 288 scmutil.checkportable(ui, abstarget)
289 289
290 290 # check for collisions
291 291 prevsrc = targets.get(abstarget)
292 292 if prevsrc is not None:
293 293 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
294 294 (reltarget, repo.pathto(abssrc, cwd),
295 295 repo.pathto(prevsrc, cwd)))
296 296 return
297 297
298 298 # check for overwrites
299 299 exists = os.path.lexists(target)
300 300 samefile = False
301 301 if exists and abssrc != abstarget:
302 302 if (repo.dirstate.normalize(abssrc) ==
303 303 repo.dirstate.normalize(abstarget)):
304 304 if not rename:
305 305 ui.warn(_("%s: can't copy - same file\n") % reltarget)
306 306 return
307 307 exists = False
308 308 samefile = True
309 309
310 310 if not after and exists or after and state in 'mn':
311 311 if not opts['force']:
312 312 ui.warn(_('%s: not overwriting - file exists\n') %
313 313 reltarget)
314 314 return
315 315
316 316 if after:
317 317 if not exists:
318 318 if rename:
319 319 ui.warn(_('%s: not recording move - %s does not exist\n') %
320 320 (relsrc, reltarget))
321 321 else:
322 322 ui.warn(_('%s: not recording copy - %s does not exist\n') %
323 323 (relsrc, reltarget))
324 324 return
325 325 elif not dryrun:
326 326 try:
327 327 if exists:
328 328 os.unlink(target)
329 329 targetdir = os.path.dirname(target) or '.'
330 330 if not os.path.isdir(targetdir):
331 331 os.makedirs(targetdir)
332 332 if samefile:
333 333 tmp = target + "~hgrename"
334 334 os.rename(src, tmp)
335 335 os.rename(tmp, target)
336 336 else:
337 337 util.copyfile(src, target)
338 338 srcexists = True
339 339 except IOError, inst:
340 340 if inst.errno == errno.ENOENT:
341 341 ui.warn(_('%s: deleted in working copy\n') % relsrc)
342 342 srcexists = False
343 343 else:
344 344 ui.warn(_('%s: cannot copy - %s\n') %
345 345 (relsrc, inst.strerror))
346 346 return True # report a failure
347 347
348 348 if ui.verbose or not exact:
349 349 if rename:
350 350 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
351 351 else:
352 352 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
353 353
354 354 targets[abstarget] = abssrc
355 355
356 356 # fix up dirstate
357 357 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
358 358 dryrun=dryrun, cwd=cwd)
359 359 if rename and not dryrun:
360 360 if not after and srcexists and not samefile:
361 361 util.unlinkpath(repo.wjoin(abssrc))
362 362 wctx.forget([abssrc])
363 363
364 364 # pat: ossep
365 365 # dest ossep
366 366 # srcs: list of (hgsep, hgsep, ossep, bool)
367 367 # return: function that takes hgsep and returns ossep
368 368 def targetpathfn(pat, dest, srcs):
369 369 if os.path.isdir(pat):
370 abspfx = scmutil.canonpath(repo.root, cwd, pat)
370 abspfx = pathutil.canonpath(repo.root, cwd, pat)
371 371 abspfx = util.localpath(abspfx)
372 372 if destdirexists:
373 373 striplen = len(os.path.split(abspfx)[0])
374 374 else:
375 375 striplen = len(abspfx)
376 376 if striplen:
377 377 striplen += len(os.sep)
378 378 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
379 379 elif destdirexists:
380 380 res = lambda p: os.path.join(dest,
381 381 os.path.basename(util.localpath(p)))
382 382 else:
383 383 res = lambda p: dest
384 384 return res
385 385
386 386 # pat: ossep
387 387 # dest ossep
388 388 # srcs: list of (hgsep, hgsep, ossep, bool)
389 389 # return: function that takes hgsep and returns ossep
390 390 def targetpathafterfn(pat, dest, srcs):
391 391 if matchmod.patkind(pat):
392 392 # a mercurial pattern
393 393 res = lambda p: os.path.join(dest,
394 394 os.path.basename(util.localpath(p)))
395 395 else:
396 abspfx = scmutil.canonpath(repo.root, cwd, pat)
396 abspfx = pathutil.canonpath(repo.root, cwd, pat)
397 397 if len(abspfx) < len(srcs[0][0]):
398 398 # A directory. Either the target path contains the last
399 399 # component of the source path or it does not.
400 400 def evalpath(striplen):
401 401 score = 0
402 402 for s in srcs:
403 403 t = os.path.join(dest, util.localpath(s[0])[striplen:])
404 404 if os.path.lexists(t):
405 405 score += 1
406 406 return score
407 407
408 408 abspfx = util.localpath(abspfx)
409 409 striplen = len(abspfx)
410 410 if striplen:
411 411 striplen += len(os.sep)
412 412 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
413 413 score = evalpath(striplen)
414 414 striplen1 = len(os.path.split(abspfx)[0])
415 415 if striplen1:
416 416 striplen1 += len(os.sep)
417 417 if evalpath(striplen1) > score:
418 418 striplen = striplen1
419 419 res = lambda p: os.path.join(dest,
420 420 util.localpath(p)[striplen:])
421 421 else:
422 422 # a file
423 423 if destdirexists:
424 424 res = lambda p: os.path.join(dest,
425 425 os.path.basename(util.localpath(p)))
426 426 else:
427 427 res = lambda p: dest
428 428 return res
429 429
430 430
431 431 pats = scmutil.expandpats(pats)
432 432 if not pats:
433 433 raise util.Abort(_('no source or destination specified'))
434 434 if len(pats) == 1:
435 435 raise util.Abort(_('no destination specified'))
436 436 dest = pats.pop()
437 437 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
438 438 if not destdirexists:
439 439 if len(pats) > 1 or matchmod.patkind(pats[0]):
440 440 raise util.Abort(_('with multiple sources, destination must be an '
441 441 'existing directory'))
442 442 if util.endswithsep(dest):
443 443 raise util.Abort(_('destination %s is not a directory') % dest)
444 444
445 445 tfn = targetpathfn
446 446 if after:
447 447 tfn = targetpathafterfn
448 448 copylist = []
449 449 for pat in pats:
450 450 srcs = walkpat(pat)
451 451 if not srcs:
452 452 continue
453 453 copylist.append((tfn(pat, dest, srcs), srcs))
454 454 if not copylist:
455 455 raise util.Abort(_('no files to copy'))
456 456
457 457 errors = 0
458 458 for targetpath, srcs in copylist:
459 459 for abssrc, relsrc, exact in srcs:
460 460 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
461 461 errors += 1
462 462
463 463 if errors:
464 464 ui.warn(_('(consider using --after)\n'))
465 465
466 466 return errors != 0
467 467
468 468 def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None,
469 469 runargs=None, appendpid=False):
470 470 '''Run a command as a service.'''
471 471
472 472 def writepid(pid):
473 473 if opts['pid_file']:
474 474 mode = appendpid and 'a' or 'w'
475 475 fp = open(opts['pid_file'], mode)
476 476 fp.write(str(pid) + '\n')
477 477 fp.close()
478 478
479 479 if opts['daemon'] and not opts['daemon_pipefds']:
480 480 # Signal child process startup with file removal
481 481 lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-')
482 482 os.close(lockfd)
483 483 try:
484 484 if not runargs:
485 485 runargs = util.hgcmd() + sys.argv[1:]
486 486 runargs.append('--daemon-pipefds=%s' % lockpath)
487 487 # Don't pass --cwd to the child process, because we've already
488 488 # changed directory.
489 489 for i in xrange(1, len(runargs)):
490 490 if runargs[i].startswith('--cwd='):
491 491 del runargs[i]
492 492 break
493 493 elif runargs[i].startswith('--cwd'):
494 494 del runargs[i:i + 2]
495 495 break
496 496 def condfn():
497 497 return not os.path.exists(lockpath)
498 498 pid = util.rundetached(runargs, condfn)
499 499 if pid < 0:
500 500 raise util.Abort(_('child process failed to start'))
501 501 writepid(pid)
502 502 finally:
503 503 try:
504 504 os.unlink(lockpath)
505 505 except OSError, e:
506 506 if e.errno != errno.ENOENT:
507 507 raise
508 508 if parentfn:
509 509 return parentfn(pid)
510 510 else:
511 511 return
512 512
513 513 if initfn:
514 514 initfn()
515 515
516 516 if not opts['daemon']:
517 517 writepid(os.getpid())
518 518
519 519 if opts['daemon_pipefds']:
520 520 lockpath = opts['daemon_pipefds']
521 521 try:
522 522 os.setsid()
523 523 except AttributeError:
524 524 pass
525 525 os.unlink(lockpath)
526 526 util.hidewindow()
527 527 sys.stdout.flush()
528 528 sys.stderr.flush()
529 529
530 530 nullfd = os.open(os.devnull, os.O_RDWR)
531 531 logfilefd = nullfd
532 532 if logfile:
533 533 logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
534 534 os.dup2(nullfd, 0)
535 535 os.dup2(logfilefd, 1)
536 536 os.dup2(logfilefd, 2)
537 537 if nullfd not in (0, 1, 2):
538 538 os.close(nullfd)
539 539 if logfile and logfilefd not in (0, 1, 2):
540 540 os.close(logfilefd)
541 541
542 542 if runfn:
543 543 return runfn()
544 544
545 545 def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
546 546 opts=None):
547 547 '''export changesets as hg patches.'''
548 548
549 549 total = len(revs)
550 550 revwidth = max([len(str(rev)) for rev in revs])
551 551 filemode = {}
552 552
553 553 def single(rev, seqno, fp):
554 554 ctx = repo[rev]
555 555 node = ctx.node()
556 556 parents = [p.node() for p in ctx.parents() if p]
557 557 branch = ctx.branch()
558 558 if switch_parent:
559 559 parents.reverse()
560 560 prev = (parents and parents[0]) or nullid
561 561
562 562 shouldclose = False
563 563 if not fp and len(template) > 0:
564 564 desc_lines = ctx.description().rstrip().split('\n')
565 565 desc = desc_lines[0] #Commit always has a first line.
566 566 fp = makefileobj(repo, template, node, desc=desc, total=total,
567 567 seqno=seqno, revwidth=revwidth, mode='wb',
568 568 modemap=filemode)
569 569 if fp != template:
570 570 shouldclose = True
571 571 if fp and fp != sys.stdout and util.safehasattr(fp, 'name'):
572 572 repo.ui.note("%s\n" % fp.name)
573 573
574 574 if not fp:
575 575 write = repo.ui.write
576 576 else:
577 577 def write(s, **kw):
578 578 fp.write(s)
579 579
580 580
581 581 write("# HG changeset patch\n")
582 582 write("# User %s\n" % ctx.user())
583 583 write("# Date %d %d\n" % ctx.date())
584 584 write("# %s\n" % util.datestr(ctx.date()))
585 585 if branch and branch != 'default':
586 586 write("# Branch %s\n" % branch)
587 587 write("# Node ID %s\n" % hex(node))
588 588 write("# Parent %s\n" % hex(prev))
589 589 if len(parents) > 1:
590 590 write("# Parent %s\n" % hex(parents[1]))
591 591 write(ctx.description().rstrip())
592 592 write("\n\n")
593 593
594 594 for chunk, label in patch.diffui(repo, prev, node, opts=opts):
595 595 write(chunk, label=label)
596 596
597 597 if shouldclose:
598 598 fp.close()
599 599
600 600 for seqno, rev in enumerate(revs):
601 601 single(rev, seqno + 1, fp)
602 602
603 603 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
604 604 changes=None, stat=False, fp=None, prefix='',
605 605 listsubrepos=False):
606 606 '''show diff or diffstat.'''
607 607 if fp is None:
608 608 write = ui.write
609 609 else:
610 610 def write(s, **kw):
611 611 fp.write(s)
612 612
613 613 if stat:
614 614 diffopts = diffopts.copy(context=0)
615 615 width = 80
616 616 if not ui.plain():
617 617 width = ui.termwidth()
618 618 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
619 619 prefix=prefix)
620 620 for chunk, label in patch.diffstatui(util.iterlines(chunks),
621 621 width=width,
622 622 git=diffopts.git):
623 623 write(chunk, label=label)
624 624 else:
625 625 for chunk, label in patch.diffui(repo, node1, node2, match,
626 626 changes, diffopts, prefix=prefix):
627 627 write(chunk, label=label)
628 628
629 629 if listsubrepos:
630 630 ctx1 = repo[node1]
631 631 ctx2 = repo[node2]
632 632 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
633 633 tempnode2 = node2
634 634 try:
635 635 if node2 is not None:
636 636 tempnode2 = ctx2.substate[subpath][1]
637 637 except KeyError:
638 638 # A subrepo that existed in node1 was deleted between node1 and
639 639 # node2 (inclusive). Thus, ctx2's substate won't contain that
640 640 # subpath. The best we can do is to ignore it.
641 641 tempnode2 = None
642 642 submatch = matchmod.narrowmatcher(subpath, match)
643 643 sub.diff(ui, diffopts, tempnode2, submatch, changes=changes,
644 644 stat=stat, fp=fp, prefix=prefix)
645 645
646 646 class changeset_printer(object):
647 647 '''show changeset information when templating not requested.'''
648 648
649 649 def __init__(self, ui, repo, patch, diffopts, buffered):
650 650 self.ui = ui
651 651 self.repo = repo
652 652 self.buffered = buffered
653 653 self.patch = patch
654 654 self.diffopts = diffopts
655 655 self.header = {}
656 656 self.hunk = {}
657 657 self.lastheader = None
658 658 self.footer = None
659 659
660 660 def flush(self, rev):
661 661 if rev in self.header:
662 662 h = self.header[rev]
663 663 if h != self.lastheader:
664 664 self.lastheader = h
665 665 self.ui.write(h)
666 666 del self.header[rev]
667 667 if rev in self.hunk:
668 668 self.ui.write(self.hunk[rev])
669 669 del self.hunk[rev]
670 670 return 1
671 671 return 0
672 672
673 673 def close(self):
674 674 if self.footer:
675 675 self.ui.write(self.footer)
676 676
677 677 def show(self, ctx, copies=None, matchfn=None, **props):
678 678 if self.buffered:
679 679 self.ui.pushbuffer()
680 680 self._show(ctx, copies, matchfn, props)
681 681 self.hunk[ctx.rev()] = self.ui.popbuffer(labeled=True)
682 682 else:
683 683 self._show(ctx, copies, matchfn, props)
684 684
685 685 def _show(self, ctx, copies, matchfn, props):
686 686 '''show a single changeset or file revision'''
687 687 changenode = ctx.node()
688 688 rev = ctx.rev()
689 689
690 690 if self.ui.quiet:
691 691 self.ui.write("%d:%s\n" % (rev, short(changenode)),
692 692 label='log.node')
693 693 return
694 694
695 695 log = self.repo.changelog
696 696 date = util.datestr(ctx.date())
697 697
698 698 hexfunc = self.ui.debugflag and hex or short
699 699
700 700 parents = [(p, hexfunc(log.node(p)))
701 701 for p in self._meaningful_parentrevs(log, rev)]
702 702
703 703 # i18n: column positioning for "hg log"
704 704 self.ui.write(_("changeset: %d:%s\n") % (rev, hexfunc(changenode)),
705 705 label='log.changeset changeset.%s' % ctx.phasestr())
706 706
707 707 branch = ctx.branch()
708 708 # don't show the default branch name
709 709 if branch != 'default':
710 710 # i18n: column positioning for "hg log"
711 711 self.ui.write(_("branch: %s\n") % branch,
712 712 label='log.branch')
713 713 for bookmark in self.repo.nodebookmarks(changenode):
714 714 # i18n: column positioning for "hg log"
715 715 self.ui.write(_("bookmark: %s\n") % bookmark,
716 716 label='log.bookmark')
717 717 for tag in self.repo.nodetags(changenode):
718 718 # i18n: column positioning for "hg log"
719 719 self.ui.write(_("tag: %s\n") % tag,
720 720 label='log.tag')
721 721 if self.ui.debugflag and ctx.phase():
722 722 # i18n: column positioning for "hg log"
723 723 self.ui.write(_("phase: %s\n") % _(ctx.phasestr()),
724 724 label='log.phase')
725 725 for parent in parents:
726 726 # i18n: column positioning for "hg log"
727 727 self.ui.write(_("parent: %d:%s\n") % parent,
728 728 label='log.parent changeset.%s' % ctx.phasestr())
729 729
730 730 if self.ui.debugflag:
731 731 mnode = ctx.manifestnode()
732 732 # i18n: column positioning for "hg log"
733 733 self.ui.write(_("manifest: %d:%s\n") %
734 734 (self.repo.manifest.rev(mnode), hex(mnode)),
735 735 label='ui.debug log.manifest')
736 736 # i18n: column positioning for "hg log"
737 737 self.ui.write(_("user: %s\n") % ctx.user(),
738 738 label='log.user')
739 739 # i18n: column positioning for "hg log"
740 740 self.ui.write(_("date: %s\n") % date,
741 741 label='log.date')
742 742
743 743 if self.ui.debugflag:
744 744 files = self.repo.status(log.parents(changenode)[0], changenode)[:3]
745 745 for key, value in zip([# i18n: column positioning for "hg log"
746 746 _("files:"),
747 747 # i18n: column positioning for "hg log"
748 748 _("files+:"),
749 749 # i18n: column positioning for "hg log"
750 750 _("files-:")], files):
751 751 if value:
752 752 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
753 753 label='ui.debug log.files')
754 754 elif ctx.files() and self.ui.verbose:
755 755 # i18n: column positioning for "hg log"
756 756 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
757 757 label='ui.note log.files')
758 758 if copies and self.ui.verbose:
759 759 copies = ['%s (%s)' % c for c in copies]
760 760 # i18n: column positioning for "hg log"
761 761 self.ui.write(_("copies: %s\n") % ' '.join(copies),
762 762 label='ui.note log.copies')
763 763
764 764 extra = ctx.extra()
765 765 if extra and self.ui.debugflag:
766 766 for key, value in sorted(extra.items()):
767 767 # i18n: column positioning for "hg log"
768 768 self.ui.write(_("extra: %s=%s\n")
769 769 % (key, value.encode('string_escape')),
770 770 label='ui.debug log.extra')
771 771
772 772 description = ctx.description().strip()
773 773 if description:
774 774 if self.ui.verbose:
775 775 self.ui.write(_("description:\n"),
776 776 label='ui.note log.description')
777 777 self.ui.write(description,
778 778 label='ui.note log.description')
779 779 self.ui.write("\n\n")
780 780 else:
781 781 # i18n: column positioning for "hg log"
782 782 self.ui.write(_("summary: %s\n") %
783 783 description.splitlines()[0],
784 784 label='log.summary')
785 785 self.ui.write("\n")
786 786
787 787 self.showpatch(changenode, matchfn)
788 788
789 789 def showpatch(self, node, matchfn):
790 790 if not matchfn:
791 791 matchfn = self.patch
792 792 if matchfn:
793 793 stat = self.diffopts.get('stat')
794 794 diff = self.diffopts.get('patch')
795 795 diffopts = patch.diffopts(self.ui, self.diffopts)
796 796 prev = self.repo.changelog.parents(node)[0]
797 797 if stat:
798 798 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
799 799 match=matchfn, stat=True)
800 800 if diff:
801 801 if stat:
802 802 self.ui.write("\n")
803 803 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
804 804 match=matchfn, stat=False)
805 805 self.ui.write("\n")
806 806
807 807 def _meaningful_parentrevs(self, log, rev):
808 808 """Return list of meaningful (or all if debug) parentrevs for rev.
809 809
810 810 For merges (two non-nullrev revisions) both parents are meaningful.
811 811 Otherwise the first parent revision is considered meaningful if it
812 812 is not the preceding revision.
813 813 """
814 814 parents = log.parentrevs(rev)
815 815 if not self.ui.debugflag and parents[1] == nullrev:
816 816 if parents[0] >= rev - 1:
817 817 parents = []
818 818 else:
819 819 parents = [parents[0]]
820 820 return parents
821 821
822 822
823 823 class changeset_templater(changeset_printer):
824 824 '''format changeset information.'''
825 825
826 826 def __init__(self, ui, repo, patch, diffopts, mapfile, buffered):
827 827 changeset_printer.__init__(self, ui, repo, patch, diffopts, buffered)
828 828 formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
829 829 defaulttempl = {
830 830 'parent': '{rev}:{node|formatnode} ',
831 831 'manifest': '{rev}:{node|formatnode}',
832 832 'file_copy': '{name} ({source})',
833 833 'extra': '{key}={value|stringescape}'
834 834 }
835 835 # filecopy is preserved for compatibility reasons
836 836 defaulttempl['filecopy'] = defaulttempl['file_copy']
837 837 self.t = templater.templater(mapfile, {'formatnode': formatnode},
838 838 cache=defaulttempl)
839 839 self.cache = {}
840 840
841 841 def use_template(self, t):
842 842 '''set template string to use'''
843 843 self.t.cache['changeset'] = t
844 844
845 845 def _meaningful_parentrevs(self, ctx):
846 846 """Return list of meaningful (or all if debug) parentrevs for rev.
847 847 """
848 848 parents = ctx.parents()
849 849 if len(parents) > 1:
850 850 return parents
851 851 if self.ui.debugflag:
852 852 return [parents[0], self.repo['null']]
853 853 if parents[0].rev() >= ctx.rev() - 1:
854 854 return []
855 855 return parents
856 856
857 857 def _show(self, ctx, copies, matchfn, props):
858 858 '''show a single changeset or file revision'''
859 859
860 860 showlist = templatekw.showlist
861 861
862 862 # showparents() behaviour depends on ui trace level which
863 863 # causes unexpected behaviours at templating level and makes
864 864 # it harder to extract it in a standalone function. Its
865 865 # behaviour cannot be changed so leave it here for now.
866 866 def showparents(**args):
867 867 ctx = args['ctx']
868 868 parents = [[('rev', p.rev()), ('node', p.hex())]
869 869 for p in self._meaningful_parentrevs(ctx)]
870 870 return showlist('parent', parents, **args)
871 871
872 872 props = props.copy()
873 873 props.update(templatekw.keywords)
874 874 props['parents'] = showparents
875 875 props['templ'] = self.t
876 876 props['ctx'] = ctx
877 877 props['repo'] = self.repo
878 878 props['revcache'] = {'copies': copies}
879 879 props['cache'] = self.cache
880 880
881 881 # find correct templates for current mode
882 882
883 883 tmplmodes = [
884 884 (True, None),
885 885 (self.ui.verbose, 'verbose'),
886 886 (self.ui.quiet, 'quiet'),
887 887 (self.ui.debugflag, 'debug'),
888 888 ]
889 889
890 890 types = {'header': '', 'footer':'', 'changeset': 'changeset'}
891 891 for mode, postfix in tmplmodes:
892 892 for type in types:
893 893 cur = postfix and ('%s_%s' % (type, postfix)) or type
894 894 if mode and cur in self.t:
895 895 types[type] = cur
896 896
897 897 try:
898 898
899 899 # write header
900 900 if types['header']:
901 901 h = templater.stringify(self.t(types['header'], **props))
902 902 if self.buffered:
903 903 self.header[ctx.rev()] = h
904 904 else:
905 905 if self.lastheader != h:
906 906 self.lastheader = h
907 907 self.ui.write(h)
908 908
909 909 # write changeset metadata, then patch if requested
910 910 key = types['changeset']
911 911 self.ui.write(templater.stringify(self.t(key, **props)))
912 912 self.showpatch(ctx.node(), matchfn)
913 913
914 914 if types['footer']:
915 915 if not self.footer:
916 916 self.footer = templater.stringify(self.t(types['footer'],
917 917 **props))
918 918
919 919 except KeyError, inst:
920 920 msg = _("%s: no key named '%s'")
921 921 raise util.Abort(msg % (self.t.mapfile, inst.args[0]))
922 922 except SyntaxError, inst:
923 923 raise util.Abort('%s: %s' % (self.t.mapfile, inst.args[0]))
924 924
925 925 def show_changeset(ui, repo, opts, buffered=False):
926 926 """show one changeset using template or regular display.
927 927
928 928 Display format will be the first non-empty hit of:
929 929 1. option 'template'
930 930 2. option 'style'
931 931 3. [ui] setting 'logtemplate'
932 932 4. [ui] setting 'style'
933 933 If all of these values are either the unset or the empty string,
934 934 regular display via changeset_printer() is done.
935 935 """
936 936 # options
937 937 patch = None
938 938 if opts.get('patch') or opts.get('stat'):
939 939 patch = scmutil.matchall(repo)
940 940
941 941 tmpl = opts.get('template')
942 942 style = None
943 943 if tmpl:
944 944 tmpl = templater.parsestring(tmpl, quoted=False)
945 945 else:
946 946 style = opts.get('style')
947 947
948 948 # ui settings
949 949 if not (tmpl or style):
950 950 tmpl = ui.config('ui', 'logtemplate')
951 951 if tmpl:
952 952 try:
953 953 tmpl = templater.parsestring(tmpl)
954 954 except SyntaxError:
955 955 tmpl = templater.parsestring(tmpl, quoted=False)
956 956 else:
957 957 style = util.expandpath(ui.config('ui', 'style', ''))
958 958
959 959 if not (tmpl or style):
960 960 return changeset_printer(ui, repo, patch, opts, buffered)
961 961
962 962 mapfile = None
963 963 if style and not tmpl:
964 964 mapfile = style
965 965 if not os.path.split(mapfile)[0]:
966 966 mapname = (templater.templatepath('map-cmdline.' + mapfile)
967 967 or templater.templatepath(mapfile))
968 968 if mapname:
969 969 mapfile = mapname
970 970
971 971 try:
972 972 t = changeset_templater(ui, repo, patch, opts, mapfile, buffered)
973 973 except SyntaxError, inst:
974 974 raise util.Abort(inst.args[0])
975 975 if tmpl:
976 976 t.use_template(tmpl)
977 977 return t
978 978
979 979 def finddate(ui, repo, date):
980 980 """Find the tipmost changeset that matches the given date spec"""
981 981
982 982 df = util.matchdate(date)
983 983 m = scmutil.matchall(repo)
984 984 results = {}
985 985
986 986 def prep(ctx, fns):
987 987 d = ctx.date()
988 988 if df(d[0]):
989 989 results[ctx.rev()] = d
990 990
991 991 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
992 992 rev = ctx.rev()
993 993 if rev in results:
994 994 ui.status(_("found revision %s from %s\n") %
995 995 (rev, util.datestr(results[rev])))
996 996 return str(rev)
997 997
998 998 raise util.Abort(_("revision matching date not found"))
999 999
1000 1000 def increasingwindows(start, end, windowsize=8, sizelimit=512):
1001 1001 if start < end:
1002 1002 while start < end:
1003 1003 yield start, min(windowsize, end - start)
1004 1004 start += windowsize
1005 1005 if windowsize < sizelimit:
1006 1006 windowsize *= 2
1007 1007 else:
1008 1008 while start > end:
1009 1009 yield start, min(windowsize, start - end - 1)
1010 1010 start -= windowsize
1011 1011 if windowsize < sizelimit:
1012 1012 windowsize *= 2
1013 1013
1014 1014 class FileWalkError(Exception):
1015 1015 pass
1016 1016
1017 1017 def walkfilerevs(repo, match, follow, revs, fncache):
1018 1018 '''Walks the file history for the matched files.
1019 1019
1020 1020 Returns the changeset revs that are involved in the file history.
1021 1021
1022 1022 Throws FileWalkError if the file history can't be walked using
1023 1023 filelogs alone.
1024 1024 '''
1025 1025 wanted = set()
1026 1026 copies = []
1027 1027 minrev, maxrev = min(revs), max(revs)
1028 1028 def filerevgen(filelog, last):
1029 1029 """
1030 1030 Only files, no patterns. Check the history of each file.
1031 1031
1032 1032 Examines filelog entries within minrev, maxrev linkrev range
1033 1033 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1034 1034 tuples in backwards order
1035 1035 """
1036 1036 cl_count = len(repo)
1037 1037 revs = []
1038 1038 for j in xrange(0, last + 1):
1039 1039 linkrev = filelog.linkrev(j)
1040 1040 if linkrev < minrev:
1041 1041 continue
1042 1042 # only yield rev for which we have the changelog, it can
1043 1043 # happen while doing "hg log" during a pull or commit
1044 1044 if linkrev >= cl_count:
1045 1045 break
1046 1046
1047 1047 parentlinkrevs = []
1048 1048 for p in filelog.parentrevs(j):
1049 1049 if p != nullrev:
1050 1050 parentlinkrevs.append(filelog.linkrev(p))
1051 1051 n = filelog.node(j)
1052 1052 revs.append((linkrev, parentlinkrevs,
1053 1053 follow and filelog.renamed(n)))
1054 1054
1055 1055 return reversed(revs)
1056 1056 def iterfiles():
1057 1057 pctx = repo['.']
1058 1058 for filename in match.files():
1059 1059 if follow:
1060 1060 if filename not in pctx:
1061 1061 raise util.Abort(_('cannot follow file not in parent '
1062 1062 'revision: "%s"') % filename)
1063 1063 yield filename, pctx[filename].filenode()
1064 1064 else:
1065 1065 yield filename, None
1066 1066 for filename_node in copies:
1067 1067 yield filename_node
1068 1068
1069 1069 for file_, node in iterfiles():
1070 1070 filelog = repo.file(file_)
1071 1071 if not len(filelog):
1072 1072 if node is None:
1073 1073 # A zero count may be a directory or deleted file, so
1074 1074 # try to find matching entries on the slow path.
1075 1075 if follow:
1076 1076 raise util.Abort(
1077 1077 _('cannot follow nonexistent file: "%s"') % file_)
1078 1078 raise FileWalkError("Cannot walk via filelog")
1079 1079 else:
1080 1080 continue
1081 1081
1082 1082 if node is None:
1083 1083 last = len(filelog) - 1
1084 1084 else:
1085 1085 last = filelog.rev(node)
1086 1086
1087 1087
1088 1088 # keep track of all ancestors of the file
1089 1089 ancestors = set([filelog.linkrev(last)])
1090 1090
1091 1091 # iterate from latest to oldest revision
1092 1092 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
1093 1093 if not follow:
1094 1094 if rev > maxrev:
1095 1095 continue
1096 1096 else:
1097 1097 # Note that last might not be the first interesting
1098 1098 # rev to us:
1099 1099 # if the file has been changed after maxrev, we'll
1100 1100 # have linkrev(last) > maxrev, and we still need
1101 1101 # to explore the file graph
1102 1102 if rev not in ancestors:
1103 1103 continue
1104 1104 # XXX insert 1327 fix here
1105 1105 if flparentlinkrevs:
1106 1106 ancestors.update(flparentlinkrevs)
1107 1107
1108 1108 fncache.setdefault(rev, []).append(file_)
1109 1109 wanted.add(rev)
1110 1110 if copied:
1111 1111 copies.append(copied)
1112 1112
1113 1113 return wanted
1114 1114
1115 1115 def walkchangerevs(repo, match, opts, prepare):
1116 1116 '''Iterate over files and the revs in which they changed.
1117 1117
1118 1118 Callers most commonly need to iterate backwards over the history
1119 1119 in which they are interested. Doing so has awful (quadratic-looking)
1120 1120 performance, so we use iterators in a "windowed" way.
1121 1121
1122 1122 We walk a window of revisions in the desired order. Within the
1123 1123 window, we first walk forwards to gather data, then in the desired
1124 1124 order (usually backwards) to display it.
1125 1125
1126 1126 This function returns an iterator yielding contexts. Before
1127 1127 yielding each context, the iterator will first call the prepare
1128 1128 function on each context in the window in forward order.'''
1129 1129
1130 1130 follow = opts.get('follow') or opts.get('follow_first')
1131 1131
1132 1132 if opts.get('rev'):
1133 1133 revs = scmutil.revrange(repo, opts.get('rev'))
1134 1134 elif follow:
1135 1135 revs = repo.revs('reverse(:.)')
1136 1136 else:
1137 1137 revs = list(repo)
1138 1138 revs.reverse()
1139 1139 if not revs:
1140 1140 return []
1141 1141 wanted = set()
1142 1142 slowpath = match.anypats() or (match.files() and opts.get('removed'))
1143 1143 fncache = {}
1144 1144 change = repo.changectx
1145 1145
1146 1146 # First step is to fill wanted, the set of revisions that we want to yield.
1147 1147 # When it does not induce extra cost, we also fill fncache for revisions in
1148 1148 # wanted: a cache of filenames that were changed (ctx.files()) and that
1149 1149 # match the file filtering conditions.
1150 1150
1151 1151 if not slowpath and not match.files():
1152 1152 # No files, no patterns. Display all revs.
1153 1153 wanted = set(revs)
1154 1154
1155 1155 if not slowpath and match.files():
1156 1156 # We only have to read through the filelog to find wanted revisions
1157 1157
1158 1158 try:
1159 1159 wanted = walkfilerevs(repo, match, follow, revs, fncache)
1160 1160 except FileWalkError:
1161 1161 slowpath = True
1162 1162
1163 1163 # We decided to fall back to the slowpath because at least one
1164 1164 # of the paths was not a file. Check to see if at least one of them
1165 1165 # existed in history, otherwise simply return
1166 1166 for path in match.files():
1167 1167 if path == '.' or path in repo.store:
1168 1168 break
1169 1169 else:
1170 1170 return []
1171 1171
1172 1172 if slowpath:
1173 1173 # We have to read the changelog to match filenames against
1174 1174 # changed files
1175 1175
1176 1176 if follow:
1177 1177 raise util.Abort(_('can only follow copies/renames for explicit '
1178 1178 'filenames'))
1179 1179
1180 1180 # The slow path checks files modified in every changeset.
1181 1181 # This is really slow on large repos, so compute the set lazily.
1182 1182 class lazywantedset(object):
1183 1183 def __init__(self):
1184 1184 self.set = set()
1185 1185 self.revs = set(revs)
1186 1186
1187 1187 # No need to worry about locality here because it will be accessed
1188 1188 # in the same order as the increasing window below.
1189 1189 def __contains__(self, value):
1190 1190 if value in self.set:
1191 1191 return True
1192 1192 elif not value in self.revs:
1193 1193 return False
1194 1194 else:
1195 1195 self.revs.discard(value)
1196 1196 ctx = change(value)
1197 1197 matches = filter(match, ctx.files())
1198 1198 if matches:
1199 1199 fncache[value] = matches
1200 1200 self.set.add(value)
1201 1201 return True
1202 1202 return False
1203 1203
1204 1204 def discard(self, value):
1205 1205 self.revs.discard(value)
1206 1206 self.set.discard(value)
1207 1207
1208 1208 wanted = lazywantedset()
1209 1209
1210 1210 class followfilter(object):
1211 1211 def __init__(self, onlyfirst=False):
1212 1212 self.startrev = nullrev
1213 1213 self.roots = set()
1214 1214 self.onlyfirst = onlyfirst
1215 1215
1216 1216 def match(self, rev):
1217 1217 def realparents(rev):
1218 1218 if self.onlyfirst:
1219 1219 return repo.changelog.parentrevs(rev)[0:1]
1220 1220 else:
1221 1221 return filter(lambda x: x != nullrev,
1222 1222 repo.changelog.parentrevs(rev))
1223 1223
1224 1224 if self.startrev == nullrev:
1225 1225 self.startrev = rev
1226 1226 return True
1227 1227
1228 1228 if rev > self.startrev:
1229 1229 # forward: all descendants
1230 1230 if not self.roots:
1231 1231 self.roots.add(self.startrev)
1232 1232 for parent in realparents(rev):
1233 1233 if parent in self.roots:
1234 1234 self.roots.add(rev)
1235 1235 return True
1236 1236 else:
1237 1237 # backwards: all parents
1238 1238 if not self.roots:
1239 1239 self.roots.update(realparents(self.startrev))
1240 1240 if rev in self.roots:
1241 1241 self.roots.remove(rev)
1242 1242 self.roots.update(realparents(rev))
1243 1243 return True
1244 1244
1245 1245 return False
1246 1246
1247 1247 # it might be worthwhile to do this in the iterator if the rev range
1248 1248 # is descending and the prune args are all within that range
1249 1249 for rev in opts.get('prune', ()):
1250 1250 rev = repo[rev].rev()
1251 1251 ff = followfilter()
1252 1252 stop = min(revs[0], revs[-1])
1253 1253 for x in xrange(rev, stop - 1, -1):
1254 1254 if ff.match(x):
1255 1255 wanted.discard(x)
1256 1256
1257 1257 # Choose a small initial window if we will probably only visit a
1258 1258 # few commits.
1259 1259 limit = loglimit(opts)
1260 1260 windowsize = 8
1261 1261 if limit:
1262 1262 windowsize = min(limit, windowsize)
1263 1263
1264 1264 # Now that wanted is correctly initialized, we can iterate over the
1265 1265 # revision range, yielding only revisions in wanted.
1266 1266 def iterate():
1267 1267 if follow and not match.files():
1268 1268 ff = followfilter(onlyfirst=opts.get('follow_first'))
1269 1269 def want(rev):
1270 1270 return ff.match(rev) and rev in wanted
1271 1271 else:
1272 1272 def want(rev):
1273 1273 return rev in wanted
1274 1274
1275 1275 for i, window in increasingwindows(0, len(revs), windowsize):
1276 1276 nrevs = [rev for rev in revs[i:i + window] if want(rev)]
1277 1277 for rev in sorted(nrevs):
1278 1278 fns = fncache.get(rev)
1279 1279 ctx = change(rev)
1280 1280 if not fns:
1281 1281 def fns_generator():
1282 1282 for f in ctx.files():
1283 1283 if match(f):
1284 1284 yield f
1285 1285 fns = fns_generator()
1286 1286 prepare(ctx, fns)
1287 1287 for rev in nrevs:
1288 1288 yield change(rev)
1289 1289 return iterate()
1290 1290
1291 1291 def _makegraphfilematcher(repo, pats, followfirst):
1292 1292 # When displaying a revision with --patch --follow FILE, we have
1293 1293 # to know which file of the revision must be diffed. With
1294 1294 # --follow, we want the names of the ancestors of FILE in the
1295 1295 # revision, stored in "fcache". "fcache" is populated by
1296 1296 # reproducing the graph traversal already done by --follow revset
1297 1297 # and relating linkrevs to file names (which is not "correct" but
1298 1298 # good enough).
1299 1299 fcache = {}
1300 1300 fcacheready = [False]
1301 1301 pctx = repo['.']
1302 1302 wctx = repo[None]
1303 1303
1304 1304 def populate():
1305 1305 for fn in pats:
1306 1306 for i in ((pctx[fn],), pctx[fn].ancestors(followfirst=followfirst)):
1307 1307 for c in i:
1308 1308 fcache.setdefault(c.linkrev(), set()).add(c.path())
1309 1309
1310 1310 def filematcher(rev):
1311 1311 if not fcacheready[0]:
1312 1312 # Lazy initialization
1313 1313 fcacheready[0] = True
1314 1314 populate()
1315 1315 return scmutil.match(wctx, fcache.get(rev, []), default='path')
1316 1316
1317 1317 return filematcher
1318 1318
1319 1319 def _makegraphlogrevset(repo, pats, opts, revs):
1320 1320 """Return (expr, filematcher) where expr is a revset string built
1321 1321 from log options and file patterns or None. If --stat or --patch
1322 1322 are not passed filematcher is None. Otherwise it is a callable
1323 1323 taking a revision number and returning a match objects filtering
1324 1324 the files to be detailed when displaying the revision.
1325 1325 """
1326 1326 opt2revset = {
1327 1327 'no_merges': ('not merge()', None),
1328 1328 'only_merges': ('merge()', None),
1329 1329 '_ancestors': ('ancestors(%(val)s)', None),
1330 1330 '_fancestors': ('_firstancestors(%(val)s)', None),
1331 1331 '_descendants': ('descendants(%(val)s)', None),
1332 1332 '_fdescendants': ('_firstdescendants(%(val)s)', None),
1333 1333 '_matchfiles': ('_matchfiles(%(val)s)', None),
1334 1334 'date': ('date(%(val)r)', None),
1335 1335 'branch': ('branch(%(val)r)', ' or '),
1336 1336 '_patslog': ('filelog(%(val)r)', ' or '),
1337 1337 '_patsfollow': ('follow(%(val)r)', ' or '),
1338 1338 '_patsfollowfirst': ('_followfirst(%(val)r)', ' or '),
1339 1339 'keyword': ('keyword(%(val)r)', ' or '),
1340 1340 'prune': ('not (%(val)r or ancestors(%(val)r))', ' and '),
1341 1341 'user': ('user(%(val)r)', ' or '),
1342 1342 }
1343 1343
1344 1344 opts = dict(opts)
1345 1345 # follow or not follow?
1346 1346 follow = opts.get('follow') or opts.get('follow_first')
1347 1347 followfirst = opts.get('follow_first') and 1 or 0
1348 1348 # --follow with FILE behaviour depends on revs...
1349 1349 startrev = revs[0]
1350 1350 followdescendants = (len(revs) > 1 and revs[0] < revs[1]) and 1 or 0
1351 1351
1352 1352 # branch and only_branch are really aliases and must be handled at
1353 1353 # the same time
1354 1354 opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
1355 1355 opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
1356 1356 # pats/include/exclude are passed to match.match() directly in
1357 1357 # _matchfiles() revset but walkchangerevs() builds its matcher with
1358 1358 # scmutil.match(). The difference is input pats are globbed on
1359 1359 # platforms without shell expansion (windows).
1360 1360 pctx = repo[None]
1361 1361 match, pats = scmutil.matchandpats(pctx, pats, opts)
1362 1362 slowpath = match.anypats() or (match.files() and opts.get('removed'))
1363 1363 if not slowpath:
1364 1364 for f in match.files():
1365 1365 if follow and f not in pctx:
1366 1366 raise util.Abort(_('cannot follow file not in parent '
1367 1367 'revision: "%s"') % f)
1368 1368 filelog = repo.file(f)
1369 1369 if not filelog:
1370 1370 # A zero count may be a directory or deleted file, so
1371 1371 # try to find matching entries on the slow path.
1372 1372 if follow:
1373 1373 raise util.Abort(
1374 1374 _('cannot follow nonexistent file: "%s"') % f)
1375 1375 slowpath = True
1376 1376
1377 1377 # We decided to fall back to the slowpath because at least one
1378 1378 # of the paths was not a file. Check to see if at least one of them
1379 1379 # existed in history - in that case, we'll continue down the
1380 1380 # slowpath; otherwise, we can turn off the slowpath
1381 1381 if slowpath:
1382 1382 for path in match.files():
1383 1383 if path == '.' or path in repo.store:
1384 1384 break
1385 1385 else:
1386 1386 slowpath = False
1387 1387
1388 1388 if slowpath:
1389 1389 # See walkchangerevs() slow path.
1390 1390 #
1391 1391 if follow:
1392 1392 raise util.Abort(_('can only follow copies/renames for explicit '
1393 1393 'filenames'))
1394 1394 # pats/include/exclude cannot be represented as separate
1395 1395 # revset expressions as their filtering logic applies at file
1396 1396 # level. For instance "-I a -X a" matches a revision touching
1397 1397 # "a" and "b" while "file(a) and not file(b)" does
1398 1398 # not. Besides, filesets are evaluated against the working
1399 1399 # directory.
1400 1400 matchargs = ['r:', 'd:relpath']
1401 1401 for p in pats:
1402 1402 matchargs.append('p:' + p)
1403 1403 for p in opts.get('include', []):
1404 1404 matchargs.append('i:' + p)
1405 1405 for p in opts.get('exclude', []):
1406 1406 matchargs.append('x:' + p)
1407 1407 matchargs = ','.join(('%r' % p) for p in matchargs)
1408 1408 opts['_matchfiles'] = matchargs
1409 1409 else:
1410 1410 if follow:
1411 1411 fpats = ('_patsfollow', '_patsfollowfirst')
1412 1412 fnopats = (('_ancestors', '_fancestors'),
1413 1413 ('_descendants', '_fdescendants'))
1414 1414 if pats:
1415 1415 # follow() revset interprets its file argument as a
1416 1416 # manifest entry, so use match.files(), not pats.
1417 1417 opts[fpats[followfirst]] = list(match.files())
1418 1418 else:
1419 1419 opts[fnopats[followdescendants][followfirst]] = str(startrev)
1420 1420 else:
1421 1421 opts['_patslog'] = list(pats)
1422 1422
1423 1423 filematcher = None
1424 1424 if opts.get('patch') or opts.get('stat'):
1425 1425 if follow:
1426 1426 filematcher = _makegraphfilematcher(repo, pats, followfirst)
1427 1427 else:
1428 1428 filematcher = lambda rev: match
1429 1429
1430 1430 expr = []
1431 1431 for op, val in opts.iteritems():
1432 1432 if not val:
1433 1433 continue
1434 1434 if op not in opt2revset:
1435 1435 continue
1436 1436 revop, andor = opt2revset[op]
1437 1437 if '%(val)' not in revop:
1438 1438 expr.append(revop)
1439 1439 else:
1440 1440 if not isinstance(val, list):
1441 1441 e = revop % {'val': val}
1442 1442 else:
1443 1443 e = '(' + andor.join((revop % {'val': v}) for v in val) + ')'
1444 1444 expr.append(e)
1445 1445
1446 1446 if expr:
1447 1447 expr = '(' + ' and '.join(expr) + ')'
1448 1448 else:
1449 1449 expr = None
1450 1450 return expr, filematcher
1451 1451
1452 1452 def getgraphlogrevs(repo, pats, opts):
1453 1453 """Return (revs, expr, filematcher) where revs is an iterable of
1454 1454 revision numbers, expr is a revset string built from log options
1455 1455 and file patterns or None, and used to filter 'revs'. If --stat or
1456 1456 --patch are not passed filematcher is None. Otherwise it is a
1457 1457 callable taking a revision number and returning a match objects
1458 1458 filtering the files to be detailed when displaying the revision.
1459 1459 """
1460 1460 if not len(repo):
1461 1461 return [], None, None
1462 1462 limit = loglimit(opts)
1463 1463 # Default --rev value depends on --follow but --follow behaviour
1464 1464 # depends on revisions resolved from --rev...
1465 1465 follow = opts.get('follow') or opts.get('follow_first')
1466 1466 possiblyunsorted = False # whether revs might need sorting
1467 1467 if opts.get('rev'):
1468 1468 revs = scmutil.revrange(repo, opts['rev'])
1469 1469 # Don't sort here because _makegraphlogrevset might depend on the
1470 1470 # order of revs
1471 1471 possiblyunsorted = True
1472 1472 else:
1473 1473 if follow and len(repo) > 0:
1474 1474 revs = repo.revs('reverse(:.)')
1475 1475 else:
1476 1476 revs = list(repo.changelog)
1477 1477 revs.reverse()
1478 1478 if not revs:
1479 1479 return [], None, None
1480 1480 expr, filematcher = _makegraphlogrevset(repo, pats, opts, revs)
1481 1481 if possiblyunsorted:
1482 1482 revs.sort(reverse=True)
1483 1483 if expr:
1484 1484 # Revset matchers often operate faster on revisions in changelog
1485 1485 # order, because most filters deal with the changelog.
1486 1486 revs.reverse()
1487 1487 matcher = revset.match(repo.ui, expr)
1488 1488 # Revset matches can reorder revisions. "A or B" typically returns
1489 1489 # returns the revision matching A then the revision matching B. Sort
1490 1490 # again to fix that.
1491 1491 revs = matcher(repo, revs)
1492 1492 revs.sort(reverse=True)
1493 1493 if limit is not None:
1494 1494 revs = revs[:limit]
1495 1495
1496 1496 return revs, expr, filematcher
1497 1497
1498 1498 def displaygraph(ui, dag, displayer, showparents, edgefn, getrenamed=None,
1499 1499 filematcher=None):
1500 1500 seen, state = [], graphmod.asciistate()
1501 1501 for rev, type, ctx, parents in dag:
1502 1502 char = 'o'
1503 1503 if ctx.node() in showparents:
1504 1504 char = '@'
1505 1505 elif ctx.obsolete():
1506 1506 char = 'x'
1507 1507 copies = None
1508 1508 if getrenamed and ctx.rev():
1509 1509 copies = []
1510 1510 for fn in ctx.files():
1511 1511 rename = getrenamed(fn, ctx.rev())
1512 1512 if rename:
1513 1513 copies.append((fn, rename[0]))
1514 1514 revmatchfn = None
1515 1515 if filematcher is not None:
1516 1516 revmatchfn = filematcher(ctx.rev())
1517 1517 displayer.show(ctx, copies=copies, matchfn=revmatchfn)
1518 1518 lines = displayer.hunk.pop(rev).split('\n')
1519 1519 if not lines[-1]:
1520 1520 del lines[-1]
1521 1521 displayer.flush(rev)
1522 1522 edges = edgefn(type, char, lines, seen, rev, parents)
1523 1523 for type, char, lines, coldata in edges:
1524 1524 graphmod.ascii(ui, state, type, char, lines, coldata)
1525 1525 displayer.close()
1526 1526
1527 1527 def graphlog(ui, repo, *pats, **opts):
1528 1528 # Parameters are identical to log command ones
1529 1529 revs, expr, filematcher = getgraphlogrevs(repo, pats, opts)
1530 1530 revdag = graphmod.dagwalker(repo, revs)
1531 1531
1532 1532 getrenamed = None
1533 1533 if opts.get('copies'):
1534 1534 endrev = None
1535 1535 if opts.get('rev'):
1536 1536 endrev = max(scmutil.revrange(repo, opts.get('rev'))) + 1
1537 1537 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
1538 1538 displayer = show_changeset(ui, repo, opts, buffered=True)
1539 1539 showparents = [ctx.node() for ctx in repo[None].parents()]
1540 1540 displaygraph(ui, revdag, displayer, showparents,
1541 1541 graphmod.asciiedges, getrenamed, filematcher)
1542 1542
1543 1543 def checkunsupportedgraphflags(pats, opts):
1544 1544 for op in ["newest_first"]:
1545 1545 if op in opts and opts[op]:
1546 1546 raise util.Abort(_("-G/--graph option is incompatible with --%s")
1547 1547 % op.replace("_", "-"))
1548 1548
1549 1549 def graphrevs(repo, nodes, opts):
1550 1550 limit = loglimit(opts)
1551 1551 nodes.reverse()
1552 1552 if limit is not None:
1553 1553 nodes = nodes[:limit]
1554 1554 return graphmod.nodes(repo, nodes)
1555 1555
1556 1556 def add(ui, repo, match, dryrun, listsubrepos, prefix, explicitonly):
1557 1557 join = lambda f: os.path.join(prefix, f)
1558 1558 bad = []
1559 1559 oldbad = match.bad
1560 1560 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
1561 1561 names = []
1562 1562 wctx = repo[None]
1563 1563 cca = None
1564 1564 abort, warn = scmutil.checkportabilityalert(ui)
1565 1565 if abort or warn:
1566 1566 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
1567 1567 for f in repo.walk(match):
1568 1568 exact = match.exact(f)
1569 1569 if exact or not explicitonly and f not in repo.dirstate:
1570 1570 if cca:
1571 1571 cca(f)
1572 1572 names.append(f)
1573 1573 if ui.verbose or not exact:
1574 1574 ui.status(_('adding %s\n') % match.rel(join(f)))
1575 1575
1576 1576 for subpath in sorted(wctx.substate):
1577 1577 sub = wctx.sub(subpath)
1578 1578 try:
1579 1579 submatch = matchmod.narrowmatcher(subpath, match)
1580 1580 if listsubrepos:
1581 1581 bad.extend(sub.add(ui, submatch, dryrun, listsubrepos, prefix,
1582 1582 False))
1583 1583 else:
1584 1584 bad.extend(sub.add(ui, submatch, dryrun, listsubrepos, prefix,
1585 1585 True))
1586 1586 except error.LookupError:
1587 1587 ui.status(_("skipping missing subrepository: %s\n")
1588 1588 % join(subpath))
1589 1589
1590 1590 if not dryrun:
1591 1591 rejected = wctx.add(names, prefix)
1592 1592 bad.extend(f for f in rejected if f in match.files())
1593 1593 return bad
1594 1594
1595 1595 def forget(ui, repo, match, prefix, explicitonly):
1596 1596 join = lambda f: os.path.join(prefix, f)
1597 1597 bad = []
1598 1598 oldbad = match.bad
1599 1599 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
1600 1600 wctx = repo[None]
1601 1601 forgot = []
1602 1602 s = repo.status(match=match, clean=True)
1603 1603 forget = sorted(s[0] + s[1] + s[3] + s[6])
1604 1604 if explicitonly:
1605 1605 forget = [f for f in forget if match.exact(f)]
1606 1606
1607 1607 for subpath in sorted(wctx.substate):
1608 1608 sub = wctx.sub(subpath)
1609 1609 try:
1610 1610 submatch = matchmod.narrowmatcher(subpath, match)
1611 1611 subbad, subforgot = sub.forget(ui, submatch, prefix)
1612 1612 bad.extend([subpath + '/' + f for f in subbad])
1613 1613 forgot.extend([subpath + '/' + f for f in subforgot])
1614 1614 except error.LookupError:
1615 1615 ui.status(_("skipping missing subrepository: %s\n")
1616 1616 % join(subpath))
1617 1617
1618 1618 if not explicitonly:
1619 1619 for f in match.files():
1620 1620 if f not in repo.dirstate and not os.path.isdir(match.rel(join(f))):
1621 1621 if f not in forgot:
1622 1622 if os.path.exists(match.rel(join(f))):
1623 1623 ui.warn(_('not removing %s: '
1624 1624 'file is already untracked\n')
1625 1625 % match.rel(join(f)))
1626 1626 bad.append(f)
1627 1627
1628 1628 for f in forget:
1629 1629 if ui.verbose or not match.exact(f):
1630 1630 ui.status(_('removing %s\n') % match.rel(join(f)))
1631 1631
1632 1632 rejected = wctx.forget(forget, prefix)
1633 1633 bad.extend(f for f in rejected if f in match.files())
1634 1634 forgot.extend(forget)
1635 1635 return bad, forgot
1636 1636
1637 1637 def duplicatecopies(repo, rev, fromrev):
1638 1638 '''reproduce copies from fromrev to rev in the dirstate'''
1639 1639 for dst, src in copies.pathcopies(repo[fromrev], repo[rev]).iteritems():
1640 1640 # copies.pathcopies returns backward renames, so dst might not
1641 1641 # actually be in the dirstate
1642 1642 if repo.dirstate[dst] in "nma":
1643 1643 repo.dirstate.copy(src, dst)
1644 1644
1645 1645 def commit(ui, repo, commitfunc, pats, opts):
1646 1646 '''commit the specified files or all outstanding changes'''
1647 1647 date = opts.get('date')
1648 1648 if date:
1649 1649 opts['date'] = util.parsedate(date)
1650 1650 message = logmessage(ui, opts)
1651 1651
1652 1652 # extract addremove carefully -- this function can be called from a command
1653 1653 # that doesn't support addremove
1654 1654 if opts.get('addremove'):
1655 1655 scmutil.addremove(repo, pats, opts)
1656 1656
1657 1657 return commitfunc(ui, repo, message,
1658 1658 scmutil.match(repo[None], pats, opts), opts)
1659 1659
1660 1660 def amend(ui, repo, commitfunc, old, extra, pats, opts):
1661 1661 ui.note(_('amending changeset %s\n') % old)
1662 1662 base = old.p1()
1663 1663
1664 1664 wlock = lock = newid = None
1665 1665 try:
1666 1666 wlock = repo.wlock()
1667 1667 lock = repo.lock()
1668 1668 tr = repo.transaction('amend')
1669 1669 try:
1670 1670 # See if we got a message from -m or -l, if not, open the editor
1671 1671 # with the message of the changeset to amend
1672 1672 message = logmessage(ui, opts)
1673 1673 # ensure logfile does not conflict with later enforcement of the
1674 1674 # message. potential logfile content has been processed by
1675 1675 # `logmessage` anyway.
1676 1676 opts.pop('logfile')
1677 1677 # First, do a regular commit to record all changes in the working
1678 1678 # directory (if there are any)
1679 1679 ui.callhooks = False
1680 1680 currentbookmark = repo._bookmarkcurrent
1681 1681 try:
1682 1682 repo._bookmarkcurrent = None
1683 1683 opts['message'] = 'temporary amend commit for %s' % old
1684 1684 node = commit(ui, repo, commitfunc, pats, opts)
1685 1685 finally:
1686 1686 repo._bookmarkcurrent = currentbookmark
1687 1687 ui.callhooks = True
1688 1688 ctx = repo[node]
1689 1689
1690 1690 # Participating changesets:
1691 1691 #
1692 1692 # node/ctx o - new (intermediate) commit that contains changes
1693 1693 # | from working dir to go into amending commit
1694 1694 # | (or a workingctx if there were no changes)
1695 1695 # |
1696 1696 # old o - changeset to amend
1697 1697 # |
1698 1698 # base o - parent of amending changeset
1699 1699
1700 1700 # Update extra dict from amended commit (e.g. to preserve graft
1701 1701 # source)
1702 1702 extra.update(old.extra())
1703 1703
1704 1704 # Also update it from the intermediate commit or from the wctx
1705 1705 extra.update(ctx.extra())
1706 1706
1707 1707 if len(old.parents()) > 1:
1708 1708 # ctx.files() isn't reliable for merges, so fall back to the
1709 1709 # slower repo.status() method
1710 1710 files = set([fn for st in repo.status(base, old)[:3]
1711 1711 for fn in st])
1712 1712 else:
1713 1713 files = set(old.files())
1714 1714
1715 1715 # Second, we use either the commit we just did, or if there were no
1716 1716 # changes the parent of the working directory as the version of the
1717 1717 # files in the final amend commit
1718 1718 if node:
1719 1719 ui.note(_('copying changeset %s to %s\n') % (ctx, base))
1720 1720
1721 1721 user = ctx.user()
1722 1722 date = ctx.date()
1723 1723 # Recompute copies (avoid recording a -> b -> a)
1724 1724 copied = copies.pathcopies(base, ctx)
1725 1725
1726 1726 # Prune files which were reverted by the updates: if old
1727 1727 # introduced file X and our intermediate commit, node,
1728 1728 # renamed that file, then those two files are the same and
1729 1729 # we can discard X from our list of files. Likewise if X
1730 1730 # was deleted, it's no longer relevant
1731 1731 files.update(ctx.files())
1732 1732
1733 1733 def samefile(f):
1734 1734 if f in ctx.manifest():
1735 1735 a = ctx.filectx(f)
1736 1736 if f in base.manifest():
1737 1737 b = base.filectx(f)
1738 1738 return (not a.cmp(b)
1739 1739 and a.flags() == b.flags())
1740 1740 else:
1741 1741 return False
1742 1742 else:
1743 1743 return f not in base.manifest()
1744 1744 files = [f for f in files if not samefile(f)]
1745 1745
1746 1746 def filectxfn(repo, ctx_, path):
1747 1747 try:
1748 1748 fctx = ctx[path]
1749 1749 flags = fctx.flags()
1750 1750 mctx = context.memfilectx(fctx.path(), fctx.data(),
1751 1751 islink='l' in flags,
1752 1752 isexec='x' in flags,
1753 1753 copied=copied.get(path))
1754 1754 return mctx
1755 1755 except KeyError:
1756 1756 raise IOError
1757 1757 else:
1758 1758 ui.note(_('copying changeset %s to %s\n') % (old, base))
1759 1759
1760 1760 # Use version of files as in the old cset
1761 1761 def filectxfn(repo, ctx_, path):
1762 1762 try:
1763 1763 return old.filectx(path)
1764 1764 except KeyError:
1765 1765 raise IOError
1766 1766
1767 1767 user = opts.get('user') or old.user()
1768 1768 date = opts.get('date') or old.date()
1769 1769 editmsg = False
1770 1770 if not message:
1771 1771 editmsg = True
1772 1772 message = old.description()
1773 1773
1774 1774 pureextra = extra.copy()
1775 1775 extra['amend_source'] = old.hex()
1776 1776
1777 1777 new = context.memctx(repo,
1778 1778 parents=[base.node(), old.p2().node()],
1779 1779 text=message,
1780 1780 files=files,
1781 1781 filectxfn=filectxfn,
1782 1782 user=user,
1783 1783 date=date,
1784 1784 extra=extra)
1785 1785 if editmsg:
1786 1786 new._text = commitforceeditor(repo, new, [])
1787 1787
1788 1788 newdesc = changelog.stripdesc(new.description())
1789 1789 if ((not node)
1790 1790 and newdesc == old.description()
1791 1791 and user == old.user()
1792 1792 and date == old.date()
1793 1793 and pureextra == old.extra()):
1794 1794 # nothing changed. continuing here would create a new node
1795 1795 # anyway because of the amend_source noise.
1796 1796 #
1797 1797 # This not what we expect from amend.
1798 1798 return old.node()
1799 1799
1800 1800 ph = repo.ui.config('phases', 'new-commit', phases.draft)
1801 1801 try:
1802 1802 repo.ui.setconfig('phases', 'new-commit', old.phase())
1803 1803 newid = repo.commitctx(new)
1804 1804 finally:
1805 1805 repo.ui.setconfig('phases', 'new-commit', ph)
1806 1806 if newid != old.node():
1807 1807 # Reroute the working copy parent to the new changeset
1808 1808 repo.setparents(newid, nullid)
1809 1809
1810 1810 # Move bookmarks from old parent to amend commit
1811 1811 bms = repo.nodebookmarks(old.node())
1812 1812 if bms:
1813 1813 marks = repo._bookmarks
1814 1814 for bm in bms:
1815 1815 marks[bm] = newid
1816 1816 marks.write()
1817 1817 #commit the whole amend process
1818 1818 if obsolete._enabled and newid != old.node():
1819 1819 # mark the new changeset as successor of the rewritten one
1820 1820 new = repo[newid]
1821 1821 obs = [(old, (new,))]
1822 1822 if node:
1823 1823 obs.append((ctx, ()))
1824 1824
1825 1825 obsolete.createmarkers(repo, obs)
1826 1826 tr.close()
1827 1827 finally:
1828 1828 tr.release()
1829 1829 if (not obsolete._enabled) and newid != old.node():
1830 1830 # Strip the intermediate commit (if there was one) and the amended
1831 1831 # commit
1832 1832 if node:
1833 1833 ui.note(_('stripping intermediate changeset %s\n') % ctx)
1834 1834 ui.note(_('stripping amended changeset %s\n') % old)
1835 1835 repair.strip(ui, repo, old.node(), topic='amend-backup')
1836 1836 finally:
1837 1837 if newid is None:
1838 1838 repo.dirstate.invalidate()
1839 1839 lockmod.release(lock, wlock)
1840 1840 return newid
1841 1841
1842 1842 def commiteditor(repo, ctx, subs):
1843 1843 if ctx.description():
1844 1844 return ctx.description()
1845 1845 return commitforceeditor(repo, ctx, subs)
1846 1846
1847 1847 def commitforceeditor(repo, ctx, subs):
1848 1848 edittext = []
1849 1849 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
1850 1850 if ctx.description():
1851 1851 edittext.append(ctx.description())
1852 1852 edittext.append("")
1853 1853 edittext.append("") # Empty line between message and comments.
1854 1854 edittext.append(_("HG: Enter commit message."
1855 1855 " Lines beginning with 'HG:' are removed."))
1856 1856 edittext.append(_("HG: Leave message empty to abort commit."))
1857 1857 edittext.append("HG: --")
1858 1858 edittext.append(_("HG: user: %s") % ctx.user())
1859 1859 if ctx.p2():
1860 1860 edittext.append(_("HG: branch merge"))
1861 1861 if ctx.branch():
1862 1862 edittext.append(_("HG: branch '%s'") % ctx.branch())
1863 1863 if bookmarks.iscurrent(repo):
1864 1864 edittext.append(_("HG: bookmark '%s'") % repo._bookmarkcurrent)
1865 1865 edittext.extend([_("HG: subrepo %s") % s for s in subs])
1866 1866 edittext.extend([_("HG: added %s") % f for f in added])
1867 1867 edittext.extend([_("HG: changed %s") % f for f in modified])
1868 1868 edittext.extend([_("HG: removed %s") % f for f in removed])
1869 1869 if not added and not modified and not removed:
1870 1870 edittext.append(_("HG: no files changed"))
1871 1871 edittext.append("")
1872 1872 # run editor in the repository root
1873 1873 olddir = os.getcwd()
1874 1874 os.chdir(repo.root)
1875 1875 text = repo.ui.edit("\n".join(edittext), ctx.user())
1876 1876 text = re.sub("(?m)^HG:.*(\n|$)", "", text)
1877 1877 os.chdir(olddir)
1878 1878
1879 1879 if not text.strip():
1880 1880 raise util.Abort(_("empty commit message"))
1881 1881
1882 1882 return text
1883 1883
1884 1884 def commitstatus(repo, node, branch, bheads=None, opts={}):
1885 1885 ctx = repo[node]
1886 1886 parents = ctx.parents()
1887 1887
1888 1888 if (not opts.get('amend') and bheads and node not in bheads and not
1889 1889 [x for x in parents if x.node() in bheads and x.branch() == branch]):
1890 1890 repo.ui.status(_('created new head\n'))
1891 1891 # The message is not printed for initial roots. For the other
1892 1892 # changesets, it is printed in the following situations:
1893 1893 #
1894 1894 # Par column: for the 2 parents with ...
1895 1895 # N: null or no parent
1896 1896 # B: parent is on another named branch
1897 1897 # C: parent is a regular non head changeset
1898 1898 # H: parent was a branch head of the current branch
1899 1899 # Msg column: whether we print "created new head" message
1900 1900 # In the following, it is assumed that there already exists some
1901 1901 # initial branch heads of the current branch, otherwise nothing is
1902 1902 # printed anyway.
1903 1903 #
1904 1904 # Par Msg Comment
1905 1905 # N N y additional topo root
1906 1906 #
1907 1907 # B N y additional branch root
1908 1908 # C N y additional topo head
1909 1909 # H N n usual case
1910 1910 #
1911 1911 # B B y weird additional branch root
1912 1912 # C B y branch merge
1913 1913 # H B n merge with named branch
1914 1914 #
1915 1915 # C C y additional head from merge
1916 1916 # C H n merge with a head
1917 1917 #
1918 1918 # H H n head merge: head count decreases
1919 1919
1920 1920 if not opts.get('close_branch'):
1921 1921 for r in parents:
1922 1922 if r.closesbranch() and r.branch() == branch:
1923 1923 repo.ui.status(_('reopening closed branch head %d\n') % r)
1924 1924
1925 1925 if repo.ui.debugflag:
1926 1926 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx.hex()))
1927 1927 elif repo.ui.verbose:
1928 1928 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx))
1929 1929
1930 1930 def revert(ui, repo, ctx, parents, *pats, **opts):
1931 1931 parent, p2 = parents
1932 1932 node = ctx.node()
1933 1933
1934 1934 mf = ctx.manifest()
1935 1935 if node == parent:
1936 1936 pmf = mf
1937 1937 else:
1938 1938 pmf = None
1939 1939
1940 1940 # need all matching names in dirstate and manifest of target rev,
1941 1941 # so have to walk both. do not print errors if files exist in one
1942 1942 # but not other.
1943 1943
1944 1944 names = {}
1945 1945
1946 1946 wlock = repo.wlock()
1947 1947 try:
1948 1948 # walk dirstate.
1949 1949
1950 1950 m = scmutil.match(repo[None], pats, opts)
1951 1951 m.bad = lambda x, y: False
1952 1952 for abs in repo.walk(m):
1953 1953 names[abs] = m.rel(abs), m.exact(abs)
1954 1954
1955 1955 # walk target manifest.
1956 1956
1957 1957 def badfn(path, msg):
1958 1958 if path in names:
1959 1959 return
1960 1960 if path in ctx.substate:
1961 1961 return
1962 1962 path_ = path + '/'
1963 1963 for f in names:
1964 1964 if f.startswith(path_):
1965 1965 return
1966 1966 ui.warn("%s: %s\n" % (m.rel(path), msg))
1967 1967
1968 1968 m = scmutil.match(ctx, pats, opts)
1969 1969 m.bad = badfn
1970 1970 for abs in ctx.walk(m):
1971 1971 if abs not in names:
1972 1972 names[abs] = m.rel(abs), m.exact(abs)
1973 1973
1974 1974 # get the list of subrepos that must be reverted
1975 1975 targetsubs = sorted(s for s in ctx.substate if m(s))
1976 1976 m = scmutil.matchfiles(repo, names)
1977 1977 changes = repo.status(match=m)[:4]
1978 1978 modified, added, removed, deleted = map(set, changes)
1979 1979
1980 1980 # if f is a rename, also revert the source
1981 1981 cwd = repo.getcwd()
1982 1982 for f in added:
1983 1983 src = repo.dirstate.copied(f)
1984 1984 if src and src not in names and repo.dirstate[src] == 'r':
1985 1985 removed.add(src)
1986 1986 names[src] = (repo.pathto(src, cwd), True)
1987 1987
1988 1988 def removeforget(abs):
1989 1989 if repo.dirstate[abs] == 'a':
1990 1990 return _('forgetting %s\n')
1991 1991 return _('removing %s\n')
1992 1992
1993 1993 revert = ([], _('reverting %s\n'))
1994 1994 add = ([], _('adding %s\n'))
1995 1995 remove = ([], removeforget)
1996 1996 undelete = ([], _('undeleting %s\n'))
1997 1997
1998 1998 disptable = (
1999 1999 # dispatch table:
2000 2000 # file state
2001 2001 # action if in target manifest
2002 2002 # action if not in target manifest
2003 2003 # make backup if in target manifest
2004 2004 # make backup if not in target manifest
2005 2005 (modified, revert, remove, True, True),
2006 2006 (added, revert, remove, True, False),
2007 2007 (removed, undelete, None, True, False),
2008 2008 (deleted, revert, remove, False, False),
2009 2009 )
2010 2010
2011 2011 for abs, (rel, exact) in sorted(names.items()):
2012 2012 mfentry = mf.get(abs)
2013 2013 target = repo.wjoin(abs)
2014 2014 def handle(xlist, dobackup):
2015 2015 xlist[0].append(abs)
2016 2016 if (dobackup and not opts.get('no_backup') and
2017 2017 os.path.lexists(target) and
2018 2018 abs in ctx and repo[None][abs].cmp(ctx[abs])):
2019 2019 bakname = "%s.orig" % rel
2020 2020 ui.note(_('saving current version of %s as %s\n') %
2021 2021 (rel, bakname))
2022 2022 if not opts.get('dry_run'):
2023 2023 util.rename(target, bakname)
2024 2024 if ui.verbose or not exact:
2025 2025 msg = xlist[1]
2026 2026 if not isinstance(msg, basestring):
2027 2027 msg = msg(abs)
2028 2028 ui.status(msg % rel)
2029 2029 for table, hitlist, misslist, backuphit, backupmiss in disptable:
2030 2030 if abs not in table:
2031 2031 continue
2032 2032 # file has changed in dirstate
2033 2033 if mfentry:
2034 2034 handle(hitlist, backuphit)
2035 2035 elif misslist is not None:
2036 2036 handle(misslist, backupmiss)
2037 2037 break
2038 2038 else:
2039 2039 if abs not in repo.dirstate:
2040 2040 if mfentry:
2041 2041 handle(add, True)
2042 2042 elif exact:
2043 2043 ui.warn(_('file not managed: %s\n') % rel)
2044 2044 continue
2045 2045 # file has not changed in dirstate
2046 2046 if node == parent:
2047 2047 if exact:
2048 2048 ui.warn(_('no changes needed to %s\n') % rel)
2049 2049 continue
2050 2050 if pmf is None:
2051 2051 # only need parent manifest in this unlikely case,
2052 2052 # so do not read by default
2053 2053 pmf = repo[parent].manifest()
2054 2054 if abs in pmf and mfentry:
2055 2055 # if version of file is same in parent and target
2056 2056 # manifests, do nothing
2057 2057 if (pmf[abs] != mfentry or
2058 2058 pmf.flags(abs) != mf.flags(abs)):
2059 2059 handle(revert, False)
2060 2060 else:
2061 2061 handle(remove, False)
2062 2062
2063 2063 if not opts.get('dry_run'):
2064 2064 def checkout(f):
2065 2065 fc = ctx[f]
2066 2066 repo.wwrite(f, fc.data(), fc.flags())
2067 2067
2068 audit_path = scmutil.pathauditor(repo.root)
2068 audit_path = pathutil.pathauditor(repo.root)
2069 2069 for f in remove[0]:
2070 2070 if repo.dirstate[f] == 'a':
2071 2071 repo.dirstate.drop(f)
2072 2072 continue
2073 2073 audit_path(f)
2074 2074 try:
2075 2075 util.unlinkpath(repo.wjoin(f))
2076 2076 except OSError:
2077 2077 pass
2078 2078 repo.dirstate.remove(f)
2079 2079
2080 2080 normal = None
2081 2081 if node == parent:
2082 2082 # We're reverting to our parent. If possible, we'd like status
2083 2083 # to report the file as clean. We have to use normallookup for
2084 2084 # merges to avoid losing information about merged/dirty files.
2085 2085 if p2 != nullid:
2086 2086 normal = repo.dirstate.normallookup
2087 2087 else:
2088 2088 normal = repo.dirstate.normal
2089 2089 for f in revert[0]:
2090 2090 checkout(f)
2091 2091 if normal:
2092 2092 normal(f)
2093 2093
2094 2094 for f in add[0]:
2095 2095 checkout(f)
2096 2096 repo.dirstate.add(f)
2097 2097
2098 2098 normal = repo.dirstate.normallookup
2099 2099 if node == parent and p2 == nullid:
2100 2100 normal = repo.dirstate.normal
2101 2101 for f in undelete[0]:
2102 2102 checkout(f)
2103 2103 normal(f)
2104 2104
2105 2105 copied = copies.pathcopies(repo[parent], ctx)
2106 2106
2107 2107 for f in add[0] + undelete[0] + revert[0]:
2108 2108 if f in copied:
2109 2109 repo.dirstate.copy(copied[f], f)
2110 2110
2111 2111 if targetsubs:
2112 2112 # Revert the subrepos on the revert list
2113 2113 for sub in targetsubs:
2114 2114 ctx.sub(sub).revert(ui, ctx.substate[sub], *pats, **opts)
2115 2115 finally:
2116 2116 wlock.release()
2117 2117
2118 2118 def command(table):
2119 2119 '''returns a function object bound to table which can be used as
2120 2120 a decorator for populating table as a command table'''
2121 2121
2122 2122 def cmd(name, options=(), synopsis=None):
2123 2123 def decorator(func):
2124 2124 if synopsis:
2125 2125 table[name] = func, list(options), synopsis
2126 2126 else:
2127 2127 table[name] = func, list(options)
2128 2128 return func
2129 2129 return decorator
2130 2130
2131 2131 return cmd
2132 2132
2133 2133 # a list of (ui, repo) functions called by commands.summary
2134 2134 summaryhooks = util.hooks()
2135 2135
2136 2136 # A list of state files kept by multistep operations like graft.
2137 2137 # Since graft cannot be aborted, it is considered 'clearable' by update.
2138 2138 # note: bisect is intentionally excluded
2139 2139 # (state file, clearable, allowcommit, error, hint)
2140 2140 unfinishedstates = [
2141 2141 ('graftstate', True, False, _('graft in progress'),
2142 2142 _("use 'hg graft --continue' or 'hg update' to abort")),
2143 2143 ('updatestate', True, False, _('last update was interrupted'),
2144 2144 _("use 'hg update' to get a consistent checkout"))
2145 2145 ]
2146 2146
2147 2147 def checkunfinished(repo, commit=False):
2148 2148 '''Look for an unfinished multistep operation, like graft, and abort
2149 2149 if found. It's probably good to check this right before
2150 2150 bailifchanged().
2151 2151 '''
2152 2152 for f, clearable, allowcommit, msg, hint in unfinishedstates:
2153 2153 if commit and allowcommit:
2154 2154 continue
2155 2155 if repo.vfs.exists(f):
2156 2156 raise util.Abort(msg, hint=hint)
2157 2157
2158 2158 def clearunfinished(repo):
2159 2159 '''Check for unfinished operations (as above), and clear the ones
2160 2160 that are clearable.
2161 2161 '''
2162 2162 for f, clearable, allowcommit, msg, hint in unfinishedstates:
2163 2163 if not clearable and repo.vfs.exists(f):
2164 2164 raise util.Abort(msg, hint=hint)
2165 2165 for f, clearable, allowcommit, msg, hint in unfinishedstates:
2166 2166 if clearable and repo.vfs.exists(f):
2167 2167 util.unlink(repo.join(f))
@@ -1,852 +1,852 b''
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 import errno
8 8
9 9 from node import nullid
10 10 from i18n import _
11 import scmutil, util, ignore, osutil, parsers, encoding
11 import scmutil, util, ignore, osutil, parsers, encoding, pathutil
12 12 import os, stat, errno, gc
13 13
14 14 propertycache = util.propertycache
15 15 filecache = scmutil.filecache
16 16 _rangemask = 0x7fffffff
17 17
18 18 class repocache(filecache):
19 19 """filecache for files in .hg/"""
20 20 def join(self, obj, fname):
21 21 return obj._opener.join(fname)
22 22
23 23 class rootcache(filecache):
24 24 """filecache for files in the repository root"""
25 25 def join(self, obj, fname):
26 26 return obj._join(fname)
27 27
28 28 class dirstate(object):
29 29
30 30 def __init__(self, opener, ui, root, validate):
31 31 '''Create a new dirstate object.
32 32
33 33 opener is an open()-like callable that can be used to open the
34 34 dirstate file; root is the root of the directory tracked by
35 35 the dirstate.
36 36 '''
37 37 self._opener = opener
38 38 self._validate = validate
39 39 self._root = root
40 40 self._rootdir = os.path.join(root, '')
41 41 self._dirty = False
42 42 self._dirtypl = False
43 43 self._lastnormaltime = 0
44 44 self._ui = ui
45 45 self._filecache = {}
46 46
47 47 @propertycache
48 48 def _map(self):
49 49 '''Return the dirstate contents as a map from filename to
50 50 (state, mode, size, time).'''
51 51 self._read()
52 52 return self._map
53 53
54 54 @propertycache
55 55 def _copymap(self):
56 56 self._read()
57 57 return self._copymap
58 58
59 59 @propertycache
60 60 def _foldmap(self):
61 61 f = {}
62 62 for name, s in self._map.iteritems():
63 63 if s[0] != 'r':
64 64 f[util.normcase(name)] = name
65 65 for name in self._dirs:
66 66 f[util.normcase(name)] = name
67 67 f['.'] = '.' # prevents useless util.fspath() invocation
68 68 return f
69 69
70 70 @repocache('branch')
71 71 def _branch(self):
72 72 try:
73 73 return self._opener.read("branch").strip() or "default"
74 74 except IOError, inst:
75 75 if inst.errno != errno.ENOENT:
76 76 raise
77 77 return "default"
78 78
79 79 @propertycache
80 80 def _pl(self):
81 81 try:
82 82 fp = self._opener("dirstate")
83 83 st = fp.read(40)
84 84 fp.close()
85 85 l = len(st)
86 86 if l == 40:
87 87 return st[:20], st[20:40]
88 88 elif l > 0 and l < 40:
89 89 raise util.Abort(_('working directory state appears damaged!'))
90 90 except IOError, err:
91 91 if err.errno != errno.ENOENT:
92 92 raise
93 93 return [nullid, nullid]
94 94
95 95 @propertycache
96 96 def _dirs(self):
97 97 return scmutil.dirs(self._map, 'r')
98 98
99 99 def dirs(self):
100 100 return self._dirs
101 101
102 102 @rootcache('.hgignore')
103 103 def _ignore(self):
104 104 files = [self._join('.hgignore')]
105 105 for name, path in self._ui.configitems("ui"):
106 106 if name == 'ignore' or name.startswith('ignore.'):
107 107 files.append(util.expandpath(path))
108 108 return ignore.ignore(self._root, files, self._ui.warn)
109 109
110 110 @propertycache
111 111 def _slash(self):
112 112 return self._ui.configbool('ui', 'slash') and os.sep != '/'
113 113
114 114 @propertycache
115 115 def _checklink(self):
116 116 return util.checklink(self._root)
117 117
118 118 @propertycache
119 119 def _checkexec(self):
120 120 return util.checkexec(self._root)
121 121
122 122 @propertycache
123 123 def _checkcase(self):
124 124 return not util.checkcase(self._join('.hg'))
125 125
126 126 def _join(self, f):
127 127 # much faster than os.path.join()
128 128 # it's safe because f is always a relative path
129 129 return self._rootdir + f
130 130
131 131 def flagfunc(self, buildfallback):
132 132 if self._checklink and self._checkexec:
133 133 def f(x):
134 134 try:
135 135 st = os.lstat(self._join(x))
136 136 if util.statislink(st):
137 137 return 'l'
138 138 if util.statisexec(st):
139 139 return 'x'
140 140 except OSError:
141 141 pass
142 142 return ''
143 143 return f
144 144
145 145 fallback = buildfallback()
146 146 if self._checklink:
147 147 def f(x):
148 148 if os.path.islink(self._join(x)):
149 149 return 'l'
150 150 if 'x' in fallback(x):
151 151 return 'x'
152 152 return ''
153 153 return f
154 154 if self._checkexec:
155 155 def f(x):
156 156 if 'l' in fallback(x):
157 157 return 'l'
158 158 if util.isexec(self._join(x)):
159 159 return 'x'
160 160 return ''
161 161 return f
162 162 else:
163 163 return fallback
164 164
165 165 def getcwd(self):
166 166 cwd = os.getcwd()
167 167 if cwd == self._root:
168 168 return ''
169 169 # self._root ends with a path separator if self._root is '/' or 'C:\'
170 170 rootsep = self._root
171 171 if not util.endswithsep(rootsep):
172 172 rootsep += os.sep
173 173 if cwd.startswith(rootsep):
174 174 return cwd[len(rootsep):]
175 175 else:
176 176 # we're outside the repo. return an absolute path.
177 177 return cwd
178 178
179 179 def pathto(self, f, cwd=None):
180 180 if cwd is None:
181 181 cwd = self.getcwd()
182 182 path = util.pathto(self._root, cwd, f)
183 183 if self._slash:
184 184 return util.pconvert(path)
185 185 return path
186 186
187 187 def __getitem__(self, key):
188 188 '''Return the current state of key (a filename) in the dirstate.
189 189
190 190 States are:
191 191 n normal
192 192 m needs merging
193 193 r marked for removal
194 194 a marked for addition
195 195 ? not tracked
196 196 '''
197 197 return self._map.get(key, ("?",))[0]
198 198
199 199 def __contains__(self, key):
200 200 return key in self._map
201 201
202 202 def __iter__(self):
203 203 for x in sorted(self._map):
204 204 yield x
205 205
206 206 def iteritems(self):
207 207 return self._map.iteritems()
208 208
209 209 def parents(self):
210 210 return [self._validate(p) for p in self._pl]
211 211
212 212 def p1(self):
213 213 return self._validate(self._pl[0])
214 214
215 215 def p2(self):
216 216 return self._validate(self._pl[1])
217 217
218 218 def branch(self):
219 219 return encoding.tolocal(self._branch)
220 220
221 221 def setparents(self, p1, p2=nullid):
222 222 """Set dirstate parents to p1 and p2.
223 223
224 224 When moving from two parents to one, 'm' merged entries a
225 225 adjusted to normal and previous copy records discarded and
226 226 returned by the call.
227 227
228 228 See localrepo.setparents()
229 229 """
230 230 self._dirty = self._dirtypl = True
231 231 oldp2 = self._pl[1]
232 232 self._pl = p1, p2
233 233 copies = {}
234 234 if oldp2 != nullid and p2 == nullid:
235 235 # Discard 'm' markers when moving away from a merge state
236 236 for f, s in self._map.iteritems():
237 237 if s[0] == 'm':
238 238 if f in self._copymap:
239 239 copies[f] = self._copymap[f]
240 240 self.normallookup(f)
241 241 return copies
242 242
243 243 def setbranch(self, branch):
244 244 self._branch = encoding.fromlocal(branch)
245 245 f = self._opener('branch', 'w', atomictemp=True)
246 246 try:
247 247 f.write(self._branch + '\n')
248 248 f.close()
249 249
250 250 # make sure filecache has the correct stat info for _branch after
251 251 # replacing the underlying file
252 252 ce = self._filecache['_branch']
253 253 if ce:
254 254 ce.refresh()
255 255 except: # re-raises
256 256 f.discard()
257 257 raise
258 258
259 259 def _read(self):
260 260 self._map = {}
261 261 self._copymap = {}
262 262 try:
263 263 st = self._opener.read("dirstate")
264 264 except IOError, err:
265 265 if err.errno != errno.ENOENT:
266 266 raise
267 267 return
268 268 if not st:
269 269 return
270 270
271 271 # Python's garbage collector triggers a GC each time a certain number
272 272 # of container objects (the number being defined by
273 273 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
274 274 # for each file in the dirstate. The C version then immediately marks
275 275 # them as not to be tracked by the collector. However, this has no
276 276 # effect on when GCs are triggered, only on what objects the GC looks
277 277 # into. This means that O(number of files) GCs are unavoidable.
278 278 # Depending on when in the process's lifetime the dirstate is parsed,
279 279 # this can get very expensive. As a workaround, disable GC while
280 280 # parsing the dirstate.
281 281 gcenabled = gc.isenabled()
282 282 gc.disable()
283 283 try:
284 284 p = parsers.parse_dirstate(self._map, self._copymap, st)
285 285 finally:
286 286 if gcenabled:
287 287 gc.enable()
288 288 if not self._dirtypl:
289 289 self._pl = p
290 290
291 291 def invalidate(self):
292 292 for a in ("_map", "_copymap", "_foldmap", "_branch", "_pl", "_dirs",
293 293 "_ignore"):
294 294 if a in self.__dict__:
295 295 delattr(self, a)
296 296 self._lastnormaltime = 0
297 297 self._dirty = False
298 298
299 299 def copy(self, source, dest):
300 300 """Mark dest as a copy of source. Unmark dest if source is None."""
301 301 if source == dest:
302 302 return
303 303 self._dirty = True
304 304 if source is not None:
305 305 self._copymap[dest] = source
306 306 elif dest in self._copymap:
307 307 del self._copymap[dest]
308 308
309 309 def copied(self, file):
310 310 return self._copymap.get(file, None)
311 311
312 312 def copies(self):
313 313 return self._copymap
314 314
315 315 def _droppath(self, f):
316 316 if self[f] not in "?r" and "_dirs" in self.__dict__:
317 317 self._dirs.delpath(f)
318 318
319 319 def _addpath(self, f, state, mode, size, mtime):
320 320 oldstate = self[f]
321 321 if state == 'a' or oldstate == 'r':
322 322 scmutil.checkfilename(f)
323 323 if f in self._dirs:
324 324 raise util.Abort(_('directory %r already in dirstate') % f)
325 325 # shadows
326 326 for d in scmutil.finddirs(f):
327 327 if d in self._dirs:
328 328 break
329 329 if d in self._map and self[d] != 'r':
330 330 raise util.Abort(
331 331 _('file %r in dirstate clashes with %r') % (d, f))
332 332 if oldstate in "?r" and "_dirs" in self.__dict__:
333 333 self._dirs.addpath(f)
334 334 self._dirty = True
335 335 self._map[f] = (state, mode, size, mtime)
336 336
337 337 def normal(self, f):
338 338 '''Mark a file normal and clean.'''
339 339 s = os.lstat(self._join(f))
340 340 mtime = int(s.st_mtime)
341 341 self._addpath(f, 'n', s.st_mode,
342 342 s.st_size & _rangemask, mtime & _rangemask)
343 343 if f in self._copymap:
344 344 del self._copymap[f]
345 345 if mtime > self._lastnormaltime:
346 346 # Remember the most recent modification timeslot for status(),
347 347 # to make sure we won't miss future size-preserving file content
348 348 # modifications that happen within the same timeslot.
349 349 self._lastnormaltime = mtime
350 350
351 351 def normallookup(self, f):
352 352 '''Mark a file normal, but possibly dirty.'''
353 353 if self._pl[1] != nullid and f in self._map:
354 354 # if there is a merge going on and the file was either
355 355 # in state 'm' (-1) or coming from other parent (-2) before
356 356 # being removed, restore that state.
357 357 entry = self._map[f]
358 358 if entry[0] == 'r' and entry[2] in (-1, -2):
359 359 source = self._copymap.get(f)
360 360 if entry[2] == -1:
361 361 self.merge(f)
362 362 elif entry[2] == -2:
363 363 self.otherparent(f)
364 364 if source:
365 365 self.copy(source, f)
366 366 return
367 367 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
368 368 return
369 369 self._addpath(f, 'n', 0, -1, -1)
370 370 if f in self._copymap:
371 371 del self._copymap[f]
372 372
373 373 def otherparent(self, f):
374 374 '''Mark as coming from the other parent, always dirty.'''
375 375 if self._pl[1] == nullid:
376 376 raise util.Abort(_("setting %r to other parent "
377 377 "only allowed in merges") % f)
378 378 self._addpath(f, 'n', 0, -2, -1)
379 379 if f in self._copymap:
380 380 del self._copymap[f]
381 381
382 382 def add(self, f):
383 383 '''Mark a file added.'''
384 384 self._addpath(f, 'a', 0, -1, -1)
385 385 if f in self._copymap:
386 386 del self._copymap[f]
387 387
388 388 def remove(self, f):
389 389 '''Mark a file removed.'''
390 390 self._dirty = True
391 391 self._droppath(f)
392 392 size = 0
393 393 if self._pl[1] != nullid and f in self._map:
394 394 # backup the previous state
395 395 entry = self._map[f]
396 396 if entry[0] == 'm': # merge
397 397 size = -1
398 398 elif entry[0] == 'n' and entry[2] == -2: # other parent
399 399 size = -2
400 400 self._map[f] = ('r', 0, size, 0)
401 401 if size == 0 and f in self._copymap:
402 402 del self._copymap[f]
403 403
404 404 def merge(self, f):
405 405 '''Mark a file merged.'''
406 406 if self._pl[1] == nullid:
407 407 return self.normallookup(f)
408 408 s = os.lstat(self._join(f))
409 409 self._addpath(f, 'm', s.st_mode,
410 410 s.st_size & _rangemask, int(s.st_mtime) & _rangemask)
411 411 if f in self._copymap:
412 412 del self._copymap[f]
413 413
414 414 def drop(self, f):
415 415 '''Drop a file from the dirstate'''
416 416 if f in self._map:
417 417 self._dirty = True
418 418 self._droppath(f)
419 419 del self._map[f]
420 420
421 421 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
422 422 normed = util.normcase(path)
423 423 folded = self._foldmap.get(normed, None)
424 424 if folded is None:
425 425 if isknown:
426 426 folded = path
427 427 else:
428 428 if exists is None:
429 429 exists = os.path.lexists(os.path.join(self._root, path))
430 430 if not exists:
431 431 # Maybe a path component exists
432 432 if not ignoremissing and '/' in path:
433 433 d, f = path.rsplit('/', 1)
434 434 d = self._normalize(d, isknown, ignoremissing, None)
435 435 folded = d + "/" + f
436 436 else:
437 437 # No path components, preserve original case
438 438 folded = path
439 439 else:
440 440 # recursively normalize leading directory components
441 441 # against dirstate
442 442 if '/' in normed:
443 443 d, f = normed.rsplit('/', 1)
444 444 d = self._normalize(d, isknown, ignoremissing, True)
445 445 r = self._root + "/" + d
446 446 folded = d + "/" + util.fspath(f, r)
447 447 else:
448 448 folded = util.fspath(normed, self._root)
449 449 self._foldmap[normed] = folded
450 450
451 451 return folded
452 452
453 453 def normalize(self, path, isknown=False, ignoremissing=False):
454 454 '''
455 455 normalize the case of a pathname when on a casefolding filesystem
456 456
457 457 isknown specifies whether the filename came from walking the
458 458 disk, to avoid extra filesystem access.
459 459
460 460 If ignoremissing is True, missing path are returned
461 461 unchanged. Otherwise, we try harder to normalize possibly
462 462 existing path components.
463 463
464 464 The normalized case is determined based on the following precedence:
465 465
466 466 - version of name already stored in the dirstate
467 467 - version of name stored on disk
468 468 - version provided via command arguments
469 469 '''
470 470
471 471 if self._checkcase:
472 472 return self._normalize(path, isknown, ignoremissing)
473 473 return path
474 474
475 475 def clear(self):
476 476 self._map = {}
477 477 if "_dirs" in self.__dict__:
478 478 delattr(self, "_dirs")
479 479 self._copymap = {}
480 480 self._pl = [nullid, nullid]
481 481 self._lastnormaltime = 0
482 482 self._dirty = True
483 483
484 484 def rebuild(self, parent, allfiles, changedfiles=None):
485 485 changedfiles = changedfiles or allfiles
486 486 oldmap = self._map
487 487 self.clear()
488 488 for f in allfiles:
489 489 if f not in changedfiles:
490 490 self._map[f] = oldmap[f]
491 491 else:
492 492 if 'x' in allfiles.flags(f):
493 493 self._map[f] = ('n', 0777, -1, 0)
494 494 else:
495 495 self._map[f] = ('n', 0666, -1, 0)
496 496 self._pl = (parent, nullid)
497 497 self._dirty = True
498 498
499 499 def write(self):
500 500 if not self._dirty:
501 501 return
502 502 st = self._opener("dirstate", "w", atomictemp=True)
503 503
504 504 def finish(s):
505 505 st.write(s)
506 506 st.close()
507 507 self._lastnormaltime = 0
508 508 self._dirty = self._dirtypl = False
509 509
510 510 # use the modification time of the newly created temporary file as the
511 511 # filesystem's notion of 'now'
512 512 now = util.fstat(st).st_mtime
513 513 finish(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
514 514
515 515 def _dirignore(self, f):
516 516 if f == '.':
517 517 return False
518 518 if self._ignore(f):
519 519 return True
520 520 for p in scmutil.finddirs(f):
521 521 if self._ignore(p):
522 522 return True
523 523 return False
524 524
525 525 def _walkexplicit(self, match, subrepos):
526 526 '''Get stat data about the files explicitly specified by match.
527 527
528 528 Return a triple (results, dirsfound, dirsnotfound).
529 529 - results is a mapping from filename to stat result. It also contains
530 530 listings mapping subrepos and .hg to None.
531 531 - dirsfound is a list of files found to be directories.
532 532 - dirsnotfound is a list of files that the dirstate thinks are
533 533 directories and that were not found.'''
534 534
535 535 def badtype(mode):
536 536 kind = _('unknown')
537 537 if stat.S_ISCHR(mode):
538 538 kind = _('character device')
539 539 elif stat.S_ISBLK(mode):
540 540 kind = _('block device')
541 541 elif stat.S_ISFIFO(mode):
542 542 kind = _('fifo')
543 543 elif stat.S_ISSOCK(mode):
544 544 kind = _('socket')
545 545 elif stat.S_ISDIR(mode):
546 546 kind = _('directory')
547 547 return _('unsupported file type (type is %s)') % kind
548 548
549 549 matchedir = match.explicitdir
550 550 badfn = match.bad
551 551 dmap = self._map
552 552 normpath = util.normpath
553 553 lstat = os.lstat
554 554 getkind = stat.S_IFMT
555 555 dirkind = stat.S_IFDIR
556 556 regkind = stat.S_IFREG
557 557 lnkkind = stat.S_IFLNK
558 558 join = self._join
559 559 dirsfound = []
560 560 foundadd = dirsfound.append
561 561 dirsnotfound = []
562 562 notfoundadd = dirsnotfound.append
563 563
564 564 if match.matchfn != match.exact and self._checkcase:
565 565 normalize = self._normalize
566 566 else:
567 567 normalize = None
568 568
569 569 files = sorted(match.files())
570 570 subrepos.sort()
571 571 i, j = 0, 0
572 572 while i < len(files) and j < len(subrepos):
573 573 subpath = subrepos[j] + "/"
574 574 if files[i] < subpath:
575 575 i += 1
576 576 continue
577 577 while i < len(files) and files[i].startswith(subpath):
578 578 del files[i]
579 579 j += 1
580 580
581 581 if not files or '.' in files:
582 582 files = ['']
583 583 results = dict.fromkeys(subrepos)
584 584 results['.hg'] = None
585 585
586 586 for ff in files:
587 587 if normalize:
588 588 nf = normalize(normpath(ff), False, True)
589 589 else:
590 590 nf = normpath(ff)
591 591 if nf in results:
592 592 continue
593 593
594 594 try:
595 595 st = lstat(join(nf))
596 596 kind = getkind(st.st_mode)
597 597 if kind == dirkind:
598 598 if nf in dmap:
599 599 #file deleted on disk but still in dirstate
600 600 results[nf] = None
601 601 if matchedir:
602 602 matchedir(nf)
603 603 foundadd(nf)
604 604 elif kind == regkind or kind == lnkkind:
605 605 results[nf] = st
606 606 else:
607 607 badfn(ff, badtype(kind))
608 608 if nf in dmap:
609 609 results[nf] = None
610 610 except OSError, inst:
611 611 if nf in dmap: # does it exactly match a file?
612 612 results[nf] = None
613 613 else: # does it match a directory?
614 614 prefix = nf + "/"
615 615 for fn in dmap:
616 616 if fn.startswith(prefix):
617 617 if matchedir:
618 618 matchedir(nf)
619 619 notfoundadd(nf)
620 620 break
621 621 else:
622 622 badfn(ff, inst.strerror)
623 623
624 624 return results, dirsfound, dirsnotfound
625 625
626 626 def walk(self, match, subrepos, unknown, ignored, full=True):
627 627 '''
628 628 Walk recursively through the directory tree, finding all files
629 629 matched by match.
630 630
631 631 If full is False, maybe skip some known-clean files.
632 632
633 633 Return a dict mapping filename to stat-like object (either
634 634 mercurial.osutil.stat instance or return value of os.stat()).
635 635
636 636 '''
637 637 # full is a flag that extensions that hook into walk can use -- this
638 638 # implementation doesn't use it at all. This satisfies the contract
639 639 # because we only guarantee a "maybe".
640 640
641 641 def fwarn(f, msg):
642 642 self._ui.warn('%s: %s\n' % (self.pathto(f), msg))
643 643 return False
644 644
645 645 ignore = self._ignore
646 646 dirignore = self._dirignore
647 647 if ignored:
648 648 ignore = util.never
649 649 dirignore = util.never
650 650 elif not unknown:
651 651 # if unknown and ignored are False, skip step 2
652 652 ignore = util.always
653 653 dirignore = util.always
654 654
655 655 matchfn = match.matchfn
656 656 matchalways = match.always()
657 657 matchtdir = match.traversedir
658 658 dmap = self._map
659 659 listdir = osutil.listdir
660 660 lstat = os.lstat
661 661 dirkind = stat.S_IFDIR
662 662 regkind = stat.S_IFREG
663 663 lnkkind = stat.S_IFLNK
664 664 join = self._join
665 665
666 666 exact = skipstep3 = False
667 667 if matchfn == match.exact: # match.exact
668 668 exact = True
669 669 dirignore = util.always # skip step 2
670 670 elif match.files() and not match.anypats(): # match.match, no patterns
671 671 skipstep3 = True
672 672
673 673 if not exact and self._checkcase:
674 674 normalize = self._normalize
675 675 skipstep3 = False
676 676 else:
677 677 normalize = None
678 678
679 679 # step 1: find all explicit files
680 680 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
681 681
682 682 skipstep3 = skipstep3 and not (work or dirsnotfound)
683 683 work = [d for d in work if not dirignore(d)]
684 684 wadd = work.append
685 685
686 686 # step 2: visit subdirectories
687 687 while work:
688 688 nd = work.pop()
689 689 skip = None
690 690 if nd == '.':
691 691 nd = ''
692 692 else:
693 693 skip = '.hg'
694 694 try:
695 695 entries = listdir(join(nd), stat=True, skip=skip)
696 696 except OSError, inst:
697 697 if inst.errno in (errno.EACCES, errno.ENOENT):
698 698 fwarn(nd, inst.strerror)
699 699 continue
700 700 raise
701 701 for f, kind, st in entries:
702 702 if normalize:
703 703 nf = normalize(nd and (nd + "/" + f) or f, True, True)
704 704 else:
705 705 nf = nd and (nd + "/" + f) or f
706 706 if nf not in results:
707 707 if kind == dirkind:
708 708 if not ignore(nf):
709 709 if matchtdir:
710 710 matchtdir(nf)
711 711 wadd(nf)
712 712 if nf in dmap and (matchalways or matchfn(nf)):
713 713 results[nf] = None
714 714 elif kind == regkind or kind == lnkkind:
715 715 if nf in dmap:
716 716 if matchalways or matchfn(nf):
717 717 results[nf] = st
718 718 elif (matchalways or matchfn(nf)) and not ignore(nf):
719 719 results[nf] = st
720 720 elif nf in dmap and (matchalways or matchfn(nf)):
721 721 results[nf] = None
722 722
723 723 for s in subrepos:
724 724 del results[s]
725 725 del results['.hg']
726 726
727 727 # step 3: report unseen items in the dmap hash
728 728 if not skipstep3 and not exact:
729 729 if not results and matchalways:
730 730 visit = dmap.keys()
731 731 else:
732 732 visit = [f for f in dmap if f not in results and matchfn(f)]
733 733 visit.sort()
734 734
735 735 if unknown:
736 736 # unknown == True means we walked the full directory tree above.
737 737 # So if a file is not seen it was either a) not matching matchfn
738 738 # b) ignored, c) missing, or d) under a symlink directory.
739 audit_path = scmutil.pathauditor(self._root)
739 audit_path = pathutil.pathauditor(self._root)
740 740
741 741 for nf in iter(visit):
742 742 # Report ignored items in the dmap as long as they are not
743 743 # under a symlink directory.
744 744 if audit_path.check(nf):
745 745 try:
746 746 results[nf] = lstat(join(nf))
747 747 except OSError:
748 748 # file doesn't exist
749 749 results[nf] = None
750 750 else:
751 751 # It's either missing or under a symlink directory
752 752 results[nf] = None
753 753 else:
754 754 # We may not have walked the full directory tree above,
755 755 # so stat everything we missed.
756 756 nf = iter(visit).next
757 757 for st in util.statfiles([join(i) for i in visit]):
758 758 results[nf()] = st
759 759 return results
760 760
761 761 def status(self, match, subrepos, ignored, clean, unknown):
762 762 '''Determine the status of the working copy relative to the
763 763 dirstate and return a tuple of lists (unsure, modified, added,
764 764 removed, deleted, unknown, ignored, clean), where:
765 765
766 766 unsure:
767 767 files that might have been modified since the dirstate was
768 768 written, but need to be read to be sure (size is the same
769 769 but mtime differs)
770 770 modified:
771 771 files that have definitely been modified since the dirstate
772 772 was written (different size or mode)
773 773 added:
774 774 files that have been explicitly added with hg add
775 775 removed:
776 776 files that have been explicitly removed with hg remove
777 777 deleted:
778 778 files that have been deleted through other means ("missing")
779 779 unknown:
780 780 files not in the dirstate that are not ignored
781 781 ignored:
782 782 files not in the dirstate that are ignored
783 783 (by _dirignore())
784 784 clean:
785 785 files that have definitely not been modified since the
786 786 dirstate was written
787 787 '''
788 788 listignored, listclean, listunknown = ignored, clean, unknown
789 789 lookup, modified, added, unknown, ignored = [], [], [], [], []
790 790 removed, deleted, clean = [], [], []
791 791
792 792 dmap = self._map
793 793 ladd = lookup.append # aka "unsure"
794 794 madd = modified.append
795 795 aadd = added.append
796 796 uadd = unknown.append
797 797 iadd = ignored.append
798 798 radd = removed.append
799 799 dadd = deleted.append
800 800 cadd = clean.append
801 801 mexact = match.exact
802 802 dirignore = self._dirignore
803 803 checkexec = self._checkexec
804 804 copymap = self._copymap
805 805 lastnormaltime = self._lastnormaltime
806 806
807 807 # We need to do full walks when either
808 808 # - we're listing all clean files, or
809 809 # - match.traversedir does something, because match.traversedir should
810 810 # be called for every dir in the working dir
811 811 full = listclean or match.traversedir is not None
812 812 for fn, st in self.walk(match, subrepos, listunknown, listignored,
813 813 full=full).iteritems():
814 814 if fn not in dmap:
815 815 if (listignored or mexact(fn)) and dirignore(fn):
816 816 if listignored:
817 817 iadd(fn)
818 818 else:
819 819 uadd(fn)
820 820 continue
821 821
822 822 state, mode, size, time = dmap[fn]
823 823
824 824 if not st and state in "nma":
825 825 dadd(fn)
826 826 elif state == 'n':
827 827 mtime = int(st.st_mtime)
828 828 if (size >= 0 and
829 829 ((size != st.st_size and size != st.st_size & _rangemask)
830 830 or ((mode ^ st.st_mode) & 0100 and checkexec))
831 831 or size == -2 # other parent
832 832 or fn in copymap):
833 833 madd(fn)
834 834 elif time != mtime and time != mtime & _rangemask:
835 835 ladd(fn)
836 836 elif mtime == lastnormaltime:
837 837 # fn may have been changed in the same timeslot without
838 838 # changing its size. This can happen if we quickly do
839 839 # multiple commits in a single transaction.
840 840 # Force lookup, so we don't miss such a racy file change.
841 841 ladd(fn)
842 842 elif listclean:
843 843 cadd(fn)
844 844 elif state == 'm':
845 845 madd(fn)
846 846 elif state == 'a':
847 847 aadd(fn)
848 848 elif state == 'r':
849 849 radd(fn)
850 850
851 851 return (lookup, modified, added, removed, deleted, unknown, ignored,
852 852 clean)
@@ -1,408 +1,408 b''
1 1 # hgweb/webutil.py - utility library for the web interface.
2 2 #
3 3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
4 4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 import os, copy
10 from mercurial import match, patch, scmutil, error, ui, util
10 from mercurial import match, patch, error, ui, util, pathutil
11 11 from mercurial.i18n import _
12 12 from mercurial.node import hex, nullid
13 13 from common import ErrorResponse
14 14 from common import HTTP_NOT_FOUND
15 15 import difflib
16 16
17 17 def up(p):
18 18 if p[0] != "/":
19 19 p = "/" + p
20 20 if p[-1] == "/":
21 21 p = p[:-1]
22 22 up = os.path.dirname(p)
23 23 if up == "/":
24 24 return "/"
25 25 return up + "/"
26 26
27 27 def _navseq(step, firststep=None):
28 28 if firststep:
29 29 yield firststep
30 30 if firststep >= 20 and firststep <= 40:
31 31 firststep = 50
32 32 yield firststep
33 33 assert step > 0
34 34 assert firststep > 0
35 35 while step <= firststep:
36 36 step *= 10
37 37 while True:
38 38 yield 1 * step
39 39 yield 3 * step
40 40 step *= 10
41 41
42 42 class revnav(object):
43 43
44 44 def __init__(self, repo):
45 45 """Navigation generation object
46 46
47 47 :repo: repo object we generate nav for
48 48 """
49 49 # used for hex generation
50 50 self._revlog = repo.changelog
51 51
52 52 def __nonzero__(self):
53 53 """return True if any revision to navigate over"""
54 54 return self._first() is not None
55 55
56 56 def _first(self):
57 57 """return the minimum non-filtered changeset or None"""
58 58 try:
59 59 return iter(self._revlog).next()
60 60 except StopIteration:
61 61 return None
62 62
63 63 def hex(self, rev):
64 64 return hex(self._revlog.node(rev))
65 65
66 66 def gen(self, pos, pagelen, limit):
67 67 """computes label and revision id for navigation link
68 68
69 69 :pos: is the revision relative to which we generate navigation.
70 70 :pagelen: the size of each navigation page
71 71 :limit: how far shall we link
72 72
73 73 The return is:
74 74 - a single element tuple
75 75 - containing a dictionary with a `before` and `after` key
76 76 - values are generator functions taking arbitrary number of kwargs
77 77 - yield items are dictionaries with `label` and `node` keys
78 78 """
79 79 if not self:
80 80 # empty repo
81 81 return ({'before': (), 'after': ()},)
82 82
83 83 targets = []
84 84 for f in _navseq(1, pagelen):
85 85 if f > limit:
86 86 break
87 87 targets.append(pos + f)
88 88 targets.append(pos - f)
89 89 targets.sort()
90 90
91 91 first = self._first()
92 92 navbefore = [("(%i)" % first, self.hex(first))]
93 93 navafter = []
94 94 for rev in targets:
95 95 if rev not in self._revlog:
96 96 continue
97 97 if pos < rev < limit:
98 98 navafter.append(("+%d" % abs(rev - pos), self.hex(rev)))
99 99 if 0 < rev < pos:
100 100 navbefore.append(("-%d" % abs(rev - pos), self.hex(rev)))
101 101
102 102
103 103 navafter.append(("tip", "tip"))
104 104
105 105 data = lambda i: {"label": i[0], "node": i[1]}
106 106 return ({'before': lambda **map: (data(i) for i in navbefore),
107 107 'after': lambda **map: (data(i) for i in navafter)},)
108 108
109 109 class filerevnav(revnav):
110 110
111 111 def __init__(self, repo, path):
112 112 """Navigation generation object
113 113
114 114 :repo: repo object we generate nav for
115 115 :path: path of the file we generate nav for
116 116 """
117 117 # used for iteration
118 118 self._changelog = repo.unfiltered().changelog
119 119 # used for hex generation
120 120 self._revlog = repo.file(path)
121 121
122 122 def hex(self, rev):
123 123 return hex(self._changelog.node(self._revlog.linkrev(rev)))
124 124
125 125
126 126 def _siblings(siblings=[], hiderev=None):
127 127 siblings = [s for s in siblings if s.node() != nullid]
128 128 if len(siblings) == 1 and siblings[0].rev() == hiderev:
129 129 return
130 130 for s in siblings:
131 131 d = {'node': s.hex(), 'rev': s.rev()}
132 132 d['user'] = s.user()
133 133 d['date'] = s.date()
134 134 d['description'] = s.description()
135 135 d['branch'] = s.branch()
136 136 if util.safehasattr(s, 'path'):
137 137 d['file'] = s.path()
138 138 yield d
139 139
140 140 def parents(ctx, hide=None):
141 141 return _siblings(ctx.parents(), hide)
142 142
143 143 def children(ctx, hide=None):
144 144 return _siblings(ctx.children(), hide)
145 145
146 146 def renamelink(fctx):
147 147 r = fctx.renamed()
148 148 if r:
149 149 return [dict(file=r[0], node=hex(r[1]))]
150 150 return []
151 151
152 152 def nodetagsdict(repo, node):
153 153 return [{"name": i} for i in repo.nodetags(node)]
154 154
155 155 def nodebookmarksdict(repo, node):
156 156 return [{"name": i} for i in repo.nodebookmarks(node)]
157 157
158 158 def nodebranchdict(repo, ctx):
159 159 branches = []
160 160 branch = ctx.branch()
161 161 # If this is an empty repo, ctx.node() == nullid,
162 162 # ctx.branch() == 'default'.
163 163 try:
164 164 branchnode = repo.branchtip(branch)
165 165 except error.RepoLookupError:
166 166 branchnode = None
167 167 if branchnode == ctx.node():
168 168 branches.append({"name": branch})
169 169 return branches
170 170
171 171 def nodeinbranch(repo, ctx):
172 172 branches = []
173 173 branch = ctx.branch()
174 174 try:
175 175 branchnode = repo.branchtip(branch)
176 176 except error.RepoLookupError:
177 177 branchnode = None
178 178 if branch != 'default' and branchnode != ctx.node():
179 179 branches.append({"name": branch})
180 180 return branches
181 181
182 182 def nodebranchnodefault(ctx):
183 183 branches = []
184 184 branch = ctx.branch()
185 185 if branch != 'default':
186 186 branches.append({"name": branch})
187 187 return branches
188 188
189 189 def showtag(repo, tmpl, t1, node=nullid, **args):
190 190 for t in repo.nodetags(node):
191 191 yield tmpl(t1, tag=t, **args)
192 192
193 193 def showbookmark(repo, tmpl, t1, node=nullid, **args):
194 194 for t in repo.nodebookmarks(node):
195 195 yield tmpl(t1, bookmark=t, **args)
196 196
197 197 def cleanpath(repo, path):
198 198 path = path.lstrip('/')
199 return scmutil.canonpath(repo.root, '', path)
199 return pathutil.canonpath(repo.root, '', path)
200 200
201 201 def changeidctx (repo, changeid):
202 202 try:
203 203 ctx = repo[changeid]
204 204 except error.RepoError:
205 205 man = repo.manifest
206 206 ctx = repo[man.linkrev(man.rev(man.lookup(changeid)))]
207 207
208 208 return ctx
209 209
210 210 def changectx (repo, req):
211 211 changeid = "tip"
212 212 if 'node' in req.form:
213 213 changeid = req.form['node'][0]
214 214 ipos=changeid.find(':')
215 215 if ipos != -1:
216 216 changeid = changeid[(ipos + 1):]
217 217 elif 'manifest' in req.form:
218 218 changeid = req.form['manifest'][0]
219 219
220 220 return changeidctx(repo, changeid)
221 221
222 222 def basechangectx(repo, req):
223 223 if 'node' in req.form:
224 224 changeid = req.form['node'][0]
225 225 ipos=changeid.find(':')
226 226 if ipos != -1:
227 227 changeid = changeid[:ipos]
228 228 return changeidctx(repo, changeid)
229 229
230 230 return None
231 231
232 232 def filectx(repo, req):
233 233 if 'file' not in req.form:
234 234 raise ErrorResponse(HTTP_NOT_FOUND, 'file not given')
235 235 path = cleanpath(repo, req.form['file'][0])
236 236 if 'node' in req.form:
237 237 changeid = req.form['node'][0]
238 238 elif 'filenode' in req.form:
239 239 changeid = req.form['filenode'][0]
240 240 else:
241 241 raise ErrorResponse(HTTP_NOT_FOUND, 'node or filenode not given')
242 242 try:
243 243 fctx = repo[changeid][path]
244 244 except error.RepoError:
245 245 fctx = repo.filectx(path, fileid=changeid)
246 246
247 247 return fctx
248 248
249 249 def listfilediffs(tmpl, files, node, max):
250 250 for f in files[:max]:
251 251 yield tmpl('filedifflink', node=hex(node), file=f)
252 252 if len(files) > max:
253 253 yield tmpl('fileellipses')
254 254
255 255 def diffs(repo, tmpl, ctx, basectx, files, parity, style):
256 256
257 257 def countgen():
258 258 start = 1
259 259 while True:
260 260 yield start
261 261 start += 1
262 262
263 263 blockcount = countgen()
264 264 def prettyprintlines(diff, blockno):
265 265 for lineno, l in enumerate(diff.splitlines(True)):
266 266 lineno = "%d.%d" % (blockno, lineno + 1)
267 267 if l.startswith('+'):
268 268 ltype = "difflineplus"
269 269 elif l.startswith('-'):
270 270 ltype = "difflineminus"
271 271 elif l.startswith('@'):
272 272 ltype = "difflineat"
273 273 else:
274 274 ltype = "diffline"
275 275 yield tmpl(ltype,
276 276 line=l,
277 277 lineid="l%s" % lineno,
278 278 linenumber="% 8s" % lineno)
279 279
280 280 if files:
281 281 m = match.exact(repo.root, repo.getcwd(), files)
282 282 else:
283 283 m = match.always(repo.root, repo.getcwd())
284 284
285 285 diffopts = patch.diffopts(repo.ui, untrusted=True)
286 286 if basectx is None:
287 287 parents = ctx.parents()
288 288 node1 = parents and parents[0].node() or nullid
289 289 else:
290 290 node1 = basectx.node()
291 291 node2 = ctx.node()
292 292
293 293 block = []
294 294 for chunk in patch.diff(repo, node1, node2, m, opts=diffopts):
295 295 if chunk.startswith('diff') and block:
296 296 blockno = blockcount.next()
297 297 yield tmpl('diffblock', parity=parity.next(), blockno=blockno,
298 298 lines=prettyprintlines(''.join(block), blockno))
299 299 block = []
300 300 if chunk.startswith('diff') and style != 'raw':
301 301 chunk = ''.join(chunk.splitlines(True)[1:])
302 302 block.append(chunk)
303 303 blockno = blockcount.next()
304 304 yield tmpl('diffblock', parity=parity.next(), blockno=blockno,
305 305 lines=prettyprintlines(''.join(block), blockno))
306 306
307 307 def compare(tmpl, context, leftlines, rightlines):
308 308 '''Generator function that provides side-by-side comparison data.'''
309 309
310 310 def compline(type, leftlineno, leftline, rightlineno, rightline):
311 311 lineid = leftlineno and ("l%s" % leftlineno) or ''
312 312 lineid += rightlineno and ("r%s" % rightlineno) or ''
313 313 return tmpl('comparisonline',
314 314 type=type,
315 315 lineid=lineid,
316 316 leftlinenumber="% 6s" % (leftlineno or ''),
317 317 leftline=leftline or '',
318 318 rightlinenumber="% 6s" % (rightlineno or ''),
319 319 rightline=rightline or '')
320 320
321 321 def getblock(opcodes):
322 322 for type, llo, lhi, rlo, rhi in opcodes:
323 323 len1 = lhi - llo
324 324 len2 = rhi - rlo
325 325 count = min(len1, len2)
326 326 for i in xrange(count):
327 327 yield compline(type=type,
328 328 leftlineno=llo + i + 1,
329 329 leftline=leftlines[llo + i],
330 330 rightlineno=rlo + i + 1,
331 331 rightline=rightlines[rlo + i])
332 332 if len1 > len2:
333 333 for i in xrange(llo + count, lhi):
334 334 yield compline(type=type,
335 335 leftlineno=i + 1,
336 336 leftline=leftlines[i],
337 337 rightlineno=None,
338 338 rightline=None)
339 339 elif len2 > len1:
340 340 for i in xrange(rlo + count, rhi):
341 341 yield compline(type=type,
342 342 leftlineno=None,
343 343 leftline=None,
344 344 rightlineno=i + 1,
345 345 rightline=rightlines[i])
346 346
347 347 s = difflib.SequenceMatcher(None, leftlines, rightlines)
348 348 if context < 0:
349 349 yield tmpl('comparisonblock', lines=getblock(s.get_opcodes()))
350 350 else:
351 351 for oc in s.get_grouped_opcodes(n=context):
352 352 yield tmpl('comparisonblock', lines=getblock(oc))
353 353
354 354 def diffstatgen(ctx, basectx):
355 355 '''Generator function that provides the diffstat data.'''
356 356
357 357 stats = patch.diffstatdata(util.iterlines(ctx.diff(basectx)))
358 358 maxname, maxtotal, addtotal, removetotal, binary = patch.diffstatsum(stats)
359 359 while True:
360 360 yield stats, maxname, maxtotal, addtotal, removetotal, binary
361 361
362 362 def diffsummary(statgen):
363 363 '''Return a short summary of the diff.'''
364 364
365 365 stats, maxname, maxtotal, addtotal, removetotal, binary = statgen.next()
366 366 return _(' %d files changed, %d insertions(+), %d deletions(-)\n') % (
367 367 len(stats), addtotal, removetotal)
368 368
369 369 def diffstat(tmpl, ctx, statgen, parity):
370 370 '''Return a diffstat template for each file in the diff.'''
371 371
372 372 stats, maxname, maxtotal, addtotal, removetotal, binary = statgen.next()
373 373 files = ctx.files()
374 374
375 375 def pct(i):
376 376 if maxtotal == 0:
377 377 return 0
378 378 return (float(i) / maxtotal) * 100
379 379
380 380 fileno = 0
381 381 for filename, adds, removes, isbinary in stats:
382 382 template = filename in files and 'diffstatlink' or 'diffstatnolink'
383 383 total = adds + removes
384 384 fileno += 1
385 385 yield tmpl(template, node=ctx.hex(), file=filename, fileno=fileno,
386 386 total=total, addpct=pct(adds), removepct=pct(removes),
387 387 parity=parity.next())
388 388
389 389 class sessionvars(object):
390 390 def __init__(self, vars, start='?'):
391 391 self.start = start
392 392 self.vars = vars
393 393 def __getitem__(self, key):
394 394 return self.vars[key]
395 395 def __setitem__(self, key, value):
396 396 self.vars[key] = value
397 397 def __copy__(self):
398 398 return sessionvars(copy.copy(self.vars), self.start)
399 399 def __iter__(self):
400 400 separator = self.start
401 401 for key, value in sorted(self.vars.iteritems()):
402 402 yield {'name': key, 'value': str(value), 'separator': separator}
403 403 separator = '&'
404 404
405 405 class wsgiui(ui.ui):
406 406 # default termwidth breaks under mod_wsgi
407 407 def termwidth(self):
408 408 return 80
@@ -1,2461 +1,2461 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import hex, nullid, short
8 8 from i18n import _
9 9 import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview
10 10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 11 import lock, transaction, store, encoding
12 12 import scmutil, util, extensions, hook, error, revset
13 13 import match as matchmod
14 14 import merge as mergemod
15 15 import tags as tagsmod
16 16 from lock import release
17 17 import weakref, errno, os, time, inspect
18 import branchmap
18 import branchmap, pathutil
19 19 propertycache = util.propertycache
20 20 filecache = scmutil.filecache
21 21
22 22 class repofilecache(filecache):
23 23 """All filecache usage on repo are done for logic that should be unfiltered
24 24 """
25 25
26 26 def __get__(self, repo, type=None):
27 27 return super(repofilecache, self).__get__(repo.unfiltered(), type)
28 28 def __set__(self, repo, value):
29 29 return super(repofilecache, self).__set__(repo.unfiltered(), value)
30 30 def __delete__(self, repo):
31 31 return super(repofilecache, self).__delete__(repo.unfiltered())
32 32
33 33 class storecache(repofilecache):
34 34 """filecache for files in the store"""
35 35 def join(self, obj, fname):
36 36 return obj.sjoin(fname)
37 37
38 38 class unfilteredpropertycache(propertycache):
39 39 """propertycache that apply to unfiltered repo only"""
40 40
41 41 def __get__(self, repo, type=None):
42 42 unfi = repo.unfiltered()
43 43 if unfi is repo:
44 44 return super(unfilteredpropertycache, self).__get__(unfi)
45 45 return getattr(unfi, self.name)
46 46
47 47 class filteredpropertycache(propertycache):
48 48 """propertycache that must take filtering in account"""
49 49
50 50 def cachevalue(self, obj, value):
51 51 object.__setattr__(obj, self.name, value)
52 52
53 53
54 54 def hasunfilteredcache(repo, name):
55 55 """check if a repo has an unfilteredpropertycache value for <name>"""
56 56 return name in vars(repo.unfiltered())
57 57
58 58 def unfilteredmethod(orig):
59 59 """decorate method that always need to be run on unfiltered version"""
60 60 def wrapper(repo, *args, **kwargs):
61 61 return orig(repo.unfiltered(), *args, **kwargs)
62 62 return wrapper
63 63
64 64 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
65 65 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
66 66
67 67 class localpeer(peer.peerrepository):
68 68 '''peer for a local repo; reflects only the most recent API'''
69 69
70 70 def __init__(self, repo, caps=MODERNCAPS):
71 71 peer.peerrepository.__init__(self)
72 72 self._repo = repo.filtered('served')
73 73 self.ui = repo.ui
74 74 self._caps = repo._restrictcapabilities(caps)
75 75 self.requirements = repo.requirements
76 76 self.supportedformats = repo.supportedformats
77 77
78 78 def close(self):
79 79 self._repo.close()
80 80
81 81 def _capabilities(self):
82 82 return self._caps
83 83
84 84 def local(self):
85 85 return self._repo
86 86
87 87 def canpush(self):
88 88 return True
89 89
90 90 def url(self):
91 91 return self._repo.url()
92 92
93 93 def lookup(self, key):
94 94 return self._repo.lookup(key)
95 95
96 96 def branchmap(self):
97 97 return self._repo.branchmap()
98 98
99 99 def heads(self):
100 100 return self._repo.heads()
101 101
102 102 def known(self, nodes):
103 103 return self._repo.known(nodes)
104 104
105 105 def getbundle(self, source, heads=None, common=None, bundlecaps=None):
106 106 return self._repo.getbundle(source, heads=heads, common=common,
107 107 bundlecaps=None)
108 108
109 109 # TODO We might want to move the next two calls into legacypeer and add
110 110 # unbundle instead.
111 111
112 112 def lock(self):
113 113 return self._repo.lock()
114 114
115 115 def addchangegroup(self, cg, source, url):
116 116 return self._repo.addchangegroup(cg, source, url)
117 117
118 118 def pushkey(self, namespace, key, old, new):
119 119 return self._repo.pushkey(namespace, key, old, new)
120 120
121 121 def listkeys(self, namespace):
122 122 return self._repo.listkeys(namespace)
123 123
124 124 def debugwireargs(self, one, two, three=None, four=None, five=None):
125 125 '''used to test argument passing over the wire'''
126 126 return "%s %s %s %s %s" % (one, two, three, four, five)
127 127
128 128 class locallegacypeer(localpeer):
129 129 '''peer extension which implements legacy methods too; used for tests with
130 130 restricted capabilities'''
131 131
132 132 def __init__(self, repo):
133 133 localpeer.__init__(self, repo, caps=LEGACYCAPS)
134 134
135 135 def branches(self, nodes):
136 136 return self._repo.branches(nodes)
137 137
138 138 def between(self, pairs):
139 139 return self._repo.between(pairs)
140 140
141 141 def changegroup(self, basenodes, source):
142 142 return self._repo.changegroup(basenodes, source)
143 143
144 144 def changegroupsubset(self, bases, heads, source):
145 145 return self._repo.changegroupsubset(bases, heads, source)
146 146
147 147 class localrepository(object):
148 148
149 149 supportedformats = set(('revlogv1', 'generaldelta'))
150 150 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
151 151 'dotencode'))
152 152 openerreqs = set(('revlogv1', 'generaldelta'))
153 153 requirements = ['revlogv1']
154 154 filtername = None
155 155
156 156 # a list of (ui, featureset) functions.
157 157 # only functions defined in module of enabled extensions are invoked
158 158 featuresetupfuncs = set()
159 159
160 160 def _baserequirements(self, create):
161 161 return self.requirements[:]
162 162
163 163 def __init__(self, baseui, path=None, create=False):
164 164 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
165 165 self.wopener = self.wvfs
166 166 self.root = self.wvfs.base
167 167 self.path = self.wvfs.join(".hg")
168 168 self.origroot = path
169 self.auditor = scmutil.pathauditor(self.root, self._checknested)
169 self.auditor = pathutil.pathauditor(self.root, self._checknested)
170 170 self.vfs = scmutil.vfs(self.path)
171 171 self.opener = self.vfs
172 172 self.baseui = baseui
173 173 self.ui = baseui.copy()
174 174 # A list of callback to shape the phase if no data were found.
175 175 # Callback are in the form: func(repo, roots) --> processed root.
176 176 # This list it to be filled by extension during repo setup
177 177 self._phasedefaults = []
178 178 try:
179 179 self.ui.readconfig(self.join("hgrc"), self.root)
180 180 extensions.loadall(self.ui)
181 181 except IOError:
182 182 pass
183 183
184 184 if self.featuresetupfuncs:
185 185 self.supported = set(self._basesupported) # use private copy
186 186 extmods = set(m.__name__ for n, m
187 187 in extensions.extensions(self.ui))
188 188 for setupfunc in self.featuresetupfuncs:
189 189 if setupfunc.__module__ in extmods:
190 190 setupfunc(self.ui, self.supported)
191 191 else:
192 192 self.supported = self._basesupported
193 193
194 194 if not self.vfs.isdir():
195 195 if create:
196 196 if not self.wvfs.exists():
197 197 self.wvfs.makedirs()
198 198 self.vfs.makedir(notindexed=True)
199 199 requirements = self._baserequirements(create)
200 200 if self.ui.configbool('format', 'usestore', True):
201 201 self.vfs.mkdir("store")
202 202 requirements.append("store")
203 203 if self.ui.configbool('format', 'usefncache', True):
204 204 requirements.append("fncache")
205 205 if self.ui.configbool('format', 'dotencode', True):
206 206 requirements.append('dotencode')
207 207 # create an invalid changelog
208 208 self.vfs.append(
209 209 "00changelog.i",
210 210 '\0\0\0\2' # represents revlogv2
211 211 ' dummy changelog to prevent using the old repo layout'
212 212 )
213 213 if self.ui.configbool('format', 'generaldelta', False):
214 214 requirements.append("generaldelta")
215 215 requirements = set(requirements)
216 216 else:
217 217 raise error.RepoError(_("repository %s not found") % path)
218 218 elif create:
219 219 raise error.RepoError(_("repository %s already exists") % path)
220 220 else:
221 221 try:
222 222 requirements = scmutil.readrequires(self.vfs, self.supported)
223 223 except IOError, inst:
224 224 if inst.errno != errno.ENOENT:
225 225 raise
226 226 requirements = set()
227 227
228 228 self.sharedpath = self.path
229 229 try:
230 230 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
231 231 realpath=True)
232 232 s = vfs.base
233 233 if not vfs.exists():
234 234 raise error.RepoError(
235 235 _('.hg/sharedpath points to nonexistent directory %s') % s)
236 236 self.sharedpath = s
237 237 except IOError, inst:
238 238 if inst.errno != errno.ENOENT:
239 239 raise
240 240
241 241 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
242 242 self.spath = self.store.path
243 243 self.svfs = self.store.vfs
244 244 self.sopener = self.svfs
245 245 self.sjoin = self.store.join
246 246 self.vfs.createmode = self.store.createmode
247 247 self._applyrequirements(requirements)
248 248 if create:
249 249 self._writerequirements()
250 250
251 251
252 252 self._branchcaches = {}
253 253 self.filterpats = {}
254 254 self._datafilters = {}
255 255 self._transref = self._lockref = self._wlockref = None
256 256
257 257 # A cache for various files under .hg/ that tracks file changes,
258 258 # (used by the filecache decorator)
259 259 #
260 260 # Maps a property name to its util.filecacheentry
261 261 self._filecache = {}
262 262
263 263 # hold sets of revision to be filtered
264 264 # should be cleared when something might have changed the filter value:
265 265 # - new changesets,
266 266 # - phase change,
267 267 # - new obsolescence marker,
268 268 # - working directory parent change,
269 269 # - bookmark changes
270 270 self.filteredrevcache = {}
271 271
272 272 def close(self):
273 273 pass
274 274
275 275 def _restrictcapabilities(self, caps):
276 276 return caps
277 277
278 278 def _applyrequirements(self, requirements):
279 279 self.requirements = requirements
280 280 self.sopener.options = dict((r, 1) for r in requirements
281 281 if r in self.openerreqs)
282 282
283 283 def _writerequirements(self):
284 284 reqfile = self.opener("requires", "w")
285 285 for r in sorted(self.requirements):
286 286 reqfile.write("%s\n" % r)
287 287 reqfile.close()
288 288
289 289 def _checknested(self, path):
290 290 """Determine if path is a legal nested repository."""
291 291 if not path.startswith(self.root):
292 292 return False
293 293 subpath = path[len(self.root) + 1:]
294 294 normsubpath = util.pconvert(subpath)
295 295
296 296 # XXX: Checking against the current working copy is wrong in
297 297 # the sense that it can reject things like
298 298 #
299 299 # $ hg cat -r 10 sub/x.txt
300 300 #
301 301 # if sub/ is no longer a subrepository in the working copy
302 302 # parent revision.
303 303 #
304 304 # However, it can of course also allow things that would have
305 305 # been rejected before, such as the above cat command if sub/
306 306 # is a subrepository now, but was a normal directory before.
307 307 # The old path auditor would have rejected by mistake since it
308 308 # panics when it sees sub/.hg/.
309 309 #
310 310 # All in all, checking against the working copy seems sensible
311 311 # since we want to prevent access to nested repositories on
312 312 # the filesystem *now*.
313 313 ctx = self[None]
314 314 parts = util.splitpath(subpath)
315 315 while parts:
316 316 prefix = '/'.join(parts)
317 317 if prefix in ctx.substate:
318 318 if prefix == normsubpath:
319 319 return True
320 320 else:
321 321 sub = ctx.sub(prefix)
322 322 return sub.checknested(subpath[len(prefix) + 1:])
323 323 else:
324 324 parts.pop()
325 325 return False
326 326
327 327 def peer(self):
328 328 return localpeer(self) # not cached to avoid reference cycle
329 329
330 330 def unfiltered(self):
331 331 """Return unfiltered version of the repository
332 332
333 333 Intended to be overwritten by filtered repo."""
334 334 return self
335 335
336 336 def filtered(self, name):
337 337 """Return a filtered version of a repository"""
338 338 # build a new class with the mixin and the current class
339 339 # (possibly subclass of the repo)
340 340 class proxycls(repoview.repoview, self.unfiltered().__class__):
341 341 pass
342 342 return proxycls(self, name)
343 343
344 344 @repofilecache('bookmarks')
345 345 def _bookmarks(self):
346 346 return bookmarks.bmstore(self)
347 347
348 348 @repofilecache('bookmarks.current')
349 349 def _bookmarkcurrent(self):
350 350 return bookmarks.readcurrent(self)
351 351
352 352 def bookmarkheads(self, bookmark):
353 353 name = bookmark.split('@', 1)[0]
354 354 heads = []
355 355 for mark, n in self._bookmarks.iteritems():
356 356 if mark.split('@', 1)[0] == name:
357 357 heads.append(n)
358 358 return heads
359 359
360 360 @storecache('phaseroots')
361 361 def _phasecache(self):
362 362 return phases.phasecache(self, self._phasedefaults)
363 363
364 364 @storecache('obsstore')
365 365 def obsstore(self):
366 366 store = obsolete.obsstore(self.sopener)
367 367 if store and not obsolete._enabled:
368 368 # message is rare enough to not be translated
369 369 msg = 'obsolete feature not enabled but %i markers found!\n'
370 370 self.ui.warn(msg % len(list(store)))
371 371 return store
372 372
373 373 @storecache('00changelog.i')
374 374 def changelog(self):
375 375 c = changelog.changelog(self.sopener)
376 376 if 'HG_PENDING' in os.environ:
377 377 p = os.environ['HG_PENDING']
378 378 if p.startswith(self.root):
379 379 c.readpending('00changelog.i.a')
380 380 return c
381 381
382 382 @storecache('00manifest.i')
383 383 def manifest(self):
384 384 return manifest.manifest(self.sopener)
385 385
386 386 @repofilecache('dirstate')
387 387 def dirstate(self):
388 388 warned = [0]
389 389 def validate(node):
390 390 try:
391 391 self.changelog.rev(node)
392 392 return node
393 393 except error.LookupError:
394 394 if not warned[0]:
395 395 warned[0] = True
396 396 self.ui.warn(_("warning: ignoring unknown"
397 397 " working parent %s!\n") % short(node))
398 398 return nullid
399 399
400 400 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
401 401
402 402 def __getitem__(self, changeid):
403 403 if changeid is None:
404 404 return context.workingctx(self)
405 405 return context.changectx(self, changeid)
406 406
407 407 def __contains__(self, changeid):
408 408 try:
409 409 return bool(self.lookup(changeid))
410 410 except error.RepoLookupError:
411 411 return False
412 412
413 413 def __nonzero__(self):
414 414 return True
415 415
416 416 def __len__(self):
417 417 return len(self.changelog)
418 418
419 419 def __iter__(self):
420 420 return iter(self.changelog)
421 421
422 422 def revs(self, expr, *args):
423 423 '''Return a list of revisions matching the given revset'''
424 424 expr = revset.formatspec(expr, *args)
425 425 m = revset.match(None, expr)
426 426 return [r for r in m(self, list(self))]
427 427
428 428 def set(self, expr, *args):
429 429 '''
430 430 Yield a context for each matching revision, after doing arg
431 431 replacement via revset.formatspec
432 432 '''
433 433 for r in self.revs(expr, *args):
434 434 yield self[r]
435 435
436 436 def url(self):
437 437 return 'file:' + self.root
438 438
439 439 def hook(self, name, throw=False, **args):
440 440 return hook.hook(self.ui, self, name, throw, **args)
441 441
442 442 @unfilteredmethod
443 443 def _tag(self, names, node, message, local, user, date, extra={}):
444 444 if isinstance(names, str):
445 445 names = (names,)
446 446
447 447 branches = self.branchmap()
448 448 for name in names:
449 449 self.hook('pretag', throw=True, node=hex(node), tag=name,
450 450 local=local)
451 451 if name in branches:
452 452 self.ui.warn(_("warning: tag %s conflicts with existing"
453 453 " branch name\n") % name)
454 454
455 455 def writetags(fp, names, munge, prevtags):
456 456 fp.seek(0, 2)
457 457 if prevtags and prevtags[-1] != '\n':
458 458 fp.write('\n')
459 459 for name in names:
460 460 m = munge and munge(name) or name
461 461 if (self._tagscache.tagtypes and
462 462 name in self._tagscache.tagtypes):
463 463 old = self.tags().get(name, nullid)
464 464 fp.write('%s %s\n' % (hex(old), m))
465 465 fp.write('%s %s\n' % (hex(node), m))
466 466 fp.close()
467 467
468 468 prevtags = ''
469 469 if local:
470 470 try:
471 471 fp = self.opener('localtags', 'r+')
472 472 except IOError:
473 473 fp = self.opener('localtags', 'a')
474 474 else:
475 475 prevtags = fp.read()
476 476
477 477 # local tags are stored in the current charset
478 478 writetags(fp, names, None, prevtags)
479 479 for name in names:
480 480 self.hook('tag', node=hex(node), tag=name, local=local)
481 481 return
482 482
483 483 try:
484 484 fp = self.wfile('.hgtags', 'rb+')
485 485 except IOError, e:
486 486 if e.errno != errno.ENOENT:
487 487 raise
488 488 fp = self.wfile('.hgtags', 'ab')
489 489 else:
490 490 prevtags = fp.read()
491 491
492 492 # committed tags are stored in UTF-8
493 493 writetags(fp, names, encoding.fromlocal, prevtags)
494 494
495 495 fp.close()
496 496
497 497 self.invalidatecaches()
498 498
499 499 if '.hgtags' not in self.dirstate:
500 500 self[None].add(['.hgtags'])
501 501
502 502 m = matchmod.exact(self.root, '', ['.hgtags'])
503 503 tagnode = self.commit(message, user, date, extra=extra, match=m)
504 504
505 505 for name in names:
506 506 self.hook('tag', node=hex(node), tag=name, local=local)
507 507
508 508 return tagnode
509 509
510 510 def tag(self, names, node, message, local, user, date):
511 511 '''tag a revision with one or more symbolic names.
512 512
513 513 names is a list of strings or, when adding a single tag, names may be a
514 514 string.
515 515
516 516 if local is True, the tags are stored in a per-repository file.
517 517 otherwise, they are stored in the .hgtags file, and a new
518 518 changeset is committed with the change.
519 519
520 520 keyword arguments:
521 521
522 522 local: whether to store tags in non-version-controlled file
523 523 (default False)
524 524
525 525 message: commit message to use if committing
526 526
527 527 user: name of user to use if committing
528 528
529 529 date: date tuple to use if committing'''
530 530
531 531 if not local:
532 532 for x in self.status()[:5]:
533 533 if '.hgtags' in x:
534 534 raise util.Abort(_('working copy of .hgtags is changed '
535 535 '(please commit .hgtags manually)'))
536 536
537 537 self.tags() # instantiate the cache
538 538 self._tag(names, node, message, local, user, date)
539 539
540 540 @filteredpropertycache
541 541 def _tagscache(self):
542 542 '''Returns a tagscache object that contains various tags related
543 543 caches.'''
544 544
545 545 # This simplifies its cache management by having one decorated
546 546 # function (this one) and the rest simply fetch things from it.
547 547 class tagscache(object):
548 548 def __init__(self):
549 549 # These two define the set of tags for this repository. tags
550 550 # maps tag name to node; tagtypes maps tag name to 'global' or
551 551 # 'local'. (Global tags are defined by .hgtags across all
552 552 # heads, and local tags are defined in .hg/localtags.)
553 553 # They constitute the in-memory cache of tags.
554 554 self.tags = self.tagtypes = None
555 555
556 556 self.nodetagscache = self.tagslist = None
557 557
558 558 cache = tagscache()
559 559 cache.tags, cache.tagtypes = self._findtags()
560 560
561 561 return cache
562 562
563 563 def tags(self):
564 564 '''return a mapping of tag to node'''
565 565 t = {}
566 566 if self.changelog.filteredrevs:
567 567 tags, tt = self._findtags()
568 568 else:
569 569 tags = self._tagscache.tags
570 570 for k, v in tags.iteritems():
571 571 try:
572 572 # ignore tags to unknown nodes
573 573 self.changelog.rev(v)
574 574 t[k] = v
575 575 except (error.LookupError, ValueError):
576 576 pass
577 577 return t
578 578
579 579 def _findtags(self):
580 580 '''Do the hard work of finding tags. Return a pair of dicts
581 581 (tags, tagtypes) where tags maps tag name to node, and tagtypes
582 582 maps tag name to a string like \'global\' or \'local\'.
583 583 Subclasses or extensions are free to add their own tags, but
584 584 should be aware that the returned dicts will be retained for the
585 585 duration of the localrepo object.'''
586 586
587 587 # XXX what tagtype should subclasses/extensions use? Currently
588 588 # mq and bookmarks add tags, but do not set the tagtype at all.
589 589 # Should each extension invent its own tag type? Should there
590 590 # be one tagtype for all such "virtual" tags? Or is the status
591 591 # quo fine?
592 592
593 593 alltags = {} # map tag name to (node, hist)
594 594 tagtypes = {}
595 595
596 596 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
597 597 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
598 598
599 599 # Build the return dicts. Have to re-encode tag names because
600 600 # the tags module always uses UTF-8 (in order not to lose info
601 601 # writing to the cache), but the rest of Mercurial wants them in
602 602 # local encoding.
603 603 tags = {}
604 604 for (name, (node, hist)) in alltags.iteritems():
605 605 if node != nullid:
606 606 tags[encoding.tolocal(name)] = node
607 607 tags['tip'] = self.changelog.tip()
608 608 tagtypes = dict([(encoding.tolocal(name), value)
609 609 for (name, value) in tagtypes.iteritems()])
610 610 return (tags, tagtypes)
611 611
612 612 def tagtype(self, tagname):
613 613 '''
614 614 return the type of the given tag. result can be:
615 615
616 616 'local' : a local tag
617 617 'global' : a global tag
618 618 None : tag does not exist
619 619 '''
620 620
621 621 return self._tagscache.tagtypes.get(tagname)
622 622
623 623 def tagslist(self):
624 624 '''return a list of tags ordered by revision'''
625 625 if not self._tagscache.tagslist:
626 626 l = []
627 627 for t, n in self.tags().iteritems():
628 628 r = self.changelog.rev(n)
629 629 l.append((r, t, n))
630 630 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
631 631
632 632 return self._tagscache.tagslist
633 633
634 634 def nodetags(self, node):
635 635 '''return the tags associated with a node'''
636 636 if not self._tagscache.nodetagscache:
637 637 nodetagscache = {}
638 638 for t, n in self._tagscache.tags.iteritems():
639 639 nodetagscache.setdefault(n, []).append(t)
640 640 for tags in nodetagscache.itervalues():
641 641 tags.sort()
642 642 self._tagscache.nodetagscache = nodetagscache
643 643 return self._tagscache.nodetagscache.get(node, [])
644 644
645 645 def nodebookmarks(self, node):
646 646 marks = []
647 647 for bookmark, n in self._bookmarks.iteritems():
648 648 if n == node:
649 649 marks.append(bookmark)
650 650 return sorted(marks)
651 651
652 652 def branchmap(self):
653 653 '''returns a dictionary {branch: [branchheads]}'''
654 654 branchmap.updatecache(self)
655 655 return self._branchcaches[self.filtername]
656 656
657 657
658 658 def _branchtip(self, heads):
659 659 '''return the tipmost branch head in heads'''
660 660 tip = heads[-1]
661 661 for h in reversed(heads):
662 662 if not self[h].closesbranch():
663 663 tip = h
664 664 break
665 665 return tip
666 666
667 667 def branchtip(self, branch):
668 668 '''return the tip node for a given branch'''
669 669 if branch not in self.branchmap():
670 670 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
671 671 return self._branchtip(self.branchmap()[branch])
672 672
673 673 def branchtags(self):
674 674 '''return a dict where branch names map to the tipmost head of
675 675 the branch, open heads come before closed'''
676 676 bt = {}
677 677 for bn, heads in self.branchmap().iteritems():
678 678 bt[bn] = self._branchtip(heads)
679 679 return bt
680 680
681 681 def lookup(self, key):
682 682 return self[key].node()
683 683
684 684 def lookupbranch(self, key, remote=None):
685 685 repo = remote or self
686 686 if key in repo.branchmap():
687 687 return key
688 688
689 689 repo = (remote and remote.local()) and remote or self
690 690 return repo[key].branch()
691 691
692 692 def known(self, nodes):
693 693 nm = self.changelog.nodemap
694 694 pc = self._phasecache
695 695 result = []
696 696 for n in nodes:
697 697 r = nm.get(n)
698 698 resp = not (r is None or pc.phase(self, r) >= phases.secret)
699 699 result.append(resp)
700 700 return result
701 701
702 702 def local(self):
703 703 return self
704 704
705 705 def cancopy(self):
706 706 return self.local() # so statichttprepo's override of local() works
707 707
708 708 def join(self, f):
709 709 return os.path.join(self.path, f)
710 710
711 711 def wjoin(self, f):
712 712 return os.path.join(self.root, f)
713 713
714 714 def file(self, f):
715 715 if f[0] == '/':
716 716 f = f[1:]
717 717 return filelog.filelog(self.sopener, f)
718 718
719 719 def changectx(self, changeid):
720 720 return self[changeid]
721 721
722 722 def parents(self, changeid=None):
723 723 '''get list of changectxs for parents of changeid'''
724 724 return self[changeid].parents()
725 725
726 726 def setparents(self, p1, p2=nullid):
727 727 copies = self.dirstate.setparents(p1, p2)
728 728 pctx = self[p1]
729 729 if copies:
730 730 # Adjust copy records, the dirstate cannot do it, it
731 731 # requires access to parents manifests. Preserve them
732 732 # only for entries added to first parent.
733 733 for f in copies:
734 734 if f not in pctx and copies[f] in pctx:
735 735 self.dirstate.copy(copies[f], f)
736 736 if p2 == nullid:
737 737 for f, s in sorted(self.dirstate.copies().items()):
738 738 if f not in pctx and s not in pctx:
739 739 self.dirstate.copy(None, f)
740 740
741 741 def filectx(self, path, changeid=None, fileid=None):
742 742 """changeid can be a changeset revision, node, or tag.
743 743 fileid can be a file revision or node."""
744 744 return context.filectx(self, path, changeid, fileid)
745 745
746 746 def getcwd(self):
747 747 return self.dirstate.getcwd()
748 748
749 749 def pathto(self, f, cwd=None):
750 750 return self.dirstate.pathto(f, cwd)
751 751
752 752 def wfile(self, f, mode='r'):
753 753 return self.wopener(f, mode)
754 754
755 755 def _link(self, f):
756 756 return self.wvfs.islink(f)
757 757
758 758 def _loadfilter(self, filter):
759 759 if filter not in self.filterpats:
760 760 l = []
761 761 for pat, cmd in self.ui.configitems(filter):
762 762 if cmd == '!':
763 763 continue
764 764 mf = matchmod.match(self.root, '', [pat])
765 765 fn = None
766 766 params = cmd
767 767 for name, filterfn in self._datafilters.iteritems():
768 768 if cmd.startswith(name):
769 769 fn = filterfn
770 770 params = cmd[len(name):].lstrip()
771 771 break
772 772 if not fn:
773 773 fn = lambda s, c, **kwargs: util.filter(s, c)
774 774 # Wrap old filters not supporting keyword arguments
775 775 if not inspect.getargspec(fn)[2]:
776 776 oldfn = fn
777 777 fn = lambda s, c, **kwargs: oldfn(s, c)
778 778 l.append((mf, fn, params))
779 779 self.filterpats[filter] = l
780 780 return self.filterpats[filter]
781 781
782 782 def _filter(self, filterpats, filename, data):
783 783 for mf, fn, cmd in filterpats:
784 784 if mf(filename):
785 785 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
786 786 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
787 787 break
788 788
789 789 return data
790 790
791 791 @unfilteredpropertycache
792 792 def _encodefilterpats(self):
793 793 return self._loadfilter('encode')
794 794
795 795 @unfilteredpropertycache
796 796 def _decodefilterpats(self):
797 797 return self._loadfilter('decode')
798 798
799 799 def adddatafilter(self, name, filter):
800 800 self._datafilters[name] = filter
801 801
802 802 def wread(self, filename):
803 803 if self._link(filename):
804 804 data = self.wvfs.readlink(filename)
805 805 else:
806 806 data = self.wopener.read(filename)
807 807 return self._filter(self._encodefilterpats, filename, data)
808 808
809 809 def wwrite(self, filename, data, flags):
810 810 data = self._filter(self._decodefilterpats, filename, data)
811 811 if 'l' in flags:
812 812 self.wopener.symlink(data, filename)
813 813 else:
814 814 self.wopener.write(filename, data)
815 815 if 'x' in flags:
816 816 self.wvfs.setflags(filename, False, True)
817 817
818 818 def wwritedata(self, filename, data):
819 819 return self._filter(self._decodefilterpats, filename, data)
820 820
821 821 def transaction(self, desc, report=None):
822 822 tr = self._transref and self._transref() or None
823 823 if tr and tr.running():
824 824 return tr.nest()
825 825
826 826 # abort here if the journal already exists
827 827 if self.svfs.exists("journal"):
828 828 raise error.RepoError(
829 829 _("abandoned transaction found - run hg recover"))
830 830
831 831 self._writejournal(desc)
832 832 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
833 833 rp = report and report or self.ui.warn
834 834 tr = transaction.transaction(rp, self.sopener,
835 835 self.sjoin("journal"),
836 836 aftertrans(renames),
837 837 self.store.createmode)
838 838 self._transref = weakref.ref(tr)
839 839 return tr
840 840
841 841 def _journalfiles(self):
842 842 return ((self.svfs, 'journal'),
843 843 (self.vfs, 'journal.dirstate'),
844 844 (self.vfs, 'journal.branch'),
845 845 (self.vfs, 'journal.desc'),
846 846 (self.vfs, 'journal.bookmarks'),
847 847 (self.svfs, 'journal.phaseroots'))
848 848
849 849 def undofiles(self):
850 850 return [vfs.join(undoname(x)) for vfs, x in self._journalfiles()]
851 851
852 852 def _writejournal(self, desc):
853 853 self.opener.write("journal.dirstate",
854 854 self.opener.tryread("dirstate"))
855 855 self.opener.write("journal.branch",
856 856 encoding.fromlocal(self.dirstate.branch()))
857 857 self.opener.write("journal.desc",
858 858 "%d\n%s\n" % (len(self), desc))
859 859 self.opener.write("journal.bookmarks",
860 860 self.opener.tryread("bookmarks"))
861 861 self.sopener.write("journal.phaseroots",
862 862 self.sopener.tryread("phaseroots"))
863 863
864 864 def recover(self):
865 865 lock = self.lock()
866 866 try:
867 867 if self.svfs.exists("journal"):
868 868 self.ui.status(_("rolling back interrupted transaction\n"))
869 869 transaction.rollback(self.sopener, self.sjoin("journal"),
870 870 self.ui.warn)
871 871 self.invalidate()
872 872 return True
873 873 else:
874 874 self.ui.warn(_("no interrupted transaction available\n"))
875 875 return False
876 876 finally:
877 877 lock.release()
878 878
879 879 def rollback(self, dryrun=False, force=False):
880 880 wlock = lock = None
881 881 try:
882 882 wlock = self.wlock()
883 883 lock = self.lock()
884 884 if self.svfs.exists("undo"):
885 885 return self._rollback(dryrun, force)
886 886 else:
887 887 self.ui.warn(_("no rollback information available\n"))
888 888 return 1
889 889 finally:
890 890 release(lock, wlock)
891 891
892 892 @unfilteredmethod # Until we get smarter cache management
893 893 def _rollback(self, dryrun, force):
894 894 ui = self.ui
895 895 try:
896 896 args = self.opener.read('undo.desc').splitlines()
897 897 (oldlen, desc, detail) = (int(args[0]), args[1], None)
898 898 if len(args) >= 3:
899 899 detail = args[2]
900 900 oldtip = oldlen - 1
901 901
902 902 if detail and ui.verbose:
903 903 msg = (_('repository tip rolled back to revision %s'
904 904 ' (undo %s: %s)\n')
905 905 % (oldtip, desc, detail))
906 906 else:
907 907 msg = (_('repository tip rolled back to revision %s'
908 908 ' (undo %s)\n')
909 909 % (oldtip, desc))
910 910 except IOError:
911 911 msg = _('rolling back unknown transaction\n')
912 912 desc = None
913 913
914 914 if not force and self['.'] != self['tip'] and desc == 'commit':
915 915 raise util.Abort(
916 916 _('rollback of last commit while not checked out '
917 917 'may lose data'), hint=_('use -f to force'))
918 918
919 919 ui.status(msg)
920 920 if dryrun:
921 921 return 0
922 922
923 923 parents = self.dirstate.parents()
924 924 self.destroying()
925 925 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
926 926 if self.vfs.exists('undo.bookmarks'):
927 927 self.vfs.rename('undo.bookmarks', 'bookmarks')
928 928 if self.svfs.exists('undo.phaseroots'):
929 929 self.svfs.rename('undo.phaseroots', 'phaseroots')
930 930 self.invalidate()
931 931
932 932 parentgone = (parents[0] not in self.changelog.nodemap or
933 933 parents[1] not in self.changelog.nodemap)
934 934 if parentgone:
935 935 self.vfs.rename('undo.dirstate', 'dirstate')
936 936 try:
937 937 branch = self.opener.read('undo.branch')
938 938 self.dirstate.setbranch(encoding.tolocal(branch))
939 939 except IOError:
940 940 ui.warn(_('named branch could not be reset: '
941 941 'current branch is still \'%s\'\n')
942 942 % self.dirstate.branch())
943 943
944 944 self.dirstate.invalidate()
945 945 parents = tuple([p.rev() for p in self.parents()])
946 946 if len(parents) > 1:
947 947 ui.status(_('working directory now based on '
948 948 'revisions %d and %d\n') % parents)
949 949 else:
950 950 ui.status(_('working directory now based on '
951 951 'revision %d\n') % parents)
952 952 # TODO: if we know which new heads may result from this rollback, pass
953 953 # them to destroy(), which will prevent the branchhead cache from being
954 954 # invalidated.
955 955 self.destroyed()
956 956 return 0
957 957
958 958 def invalidatecaches(self):
959 959
960 960 if '_tagscache' in vars(self):
961 961 # can't use delattr on proxy
962 962 del self.__dict__['_tagscache']
963 963
964 964 self.unfiltered()._branchcaches.clear()
965 965 self.invalidatevolatilesets()
966 966
967 967 def invalidatevolatilesets(self):
968 968 self.filteredrevcache.clear()
969 969 obsolete.clearobscaches(self)
970 970
971 971 def invalidatedirstate(self):
972 972 '''Invalidates the dirstate, causing the next call to dirstate
973 973 to check if it was modified since the last time it was read,
974 974 rereading it if it has.
975 975
976 976 This is different to dirstate.invalidate() that it doesn't always
977 977 rereads the dirstate. Use dirstate.invalidate() if you want to
978 978 explicitly read the dirstate again (i.e. restoring it to a previous
979 979 known good state).'''
980 980 if hasunfilteredcache(self, 'dirstate'):
981 981 for k in self.dirstate._filecache:
982 982 try:
983 983 delattr(self.dirstate, k)
984 984 except AttributeError:
985 985 pass
986 986 delattr(self.unfiltered(), 'dirstate')
987 987
988 988 def invalidate(self):
989 989 unfiltered = self.unfiltered() # all file caches are stored unfiltered
990 990 for k in self._filecache:
991 991 # dirstate is invalidated separately in invalidatedirstate()
992 992 if k == 'dirstate':
993 993 continue
994 994
995 995 try:
996 996 delattr(unfiltered, k)
997 997 except AttributeError:
998 998 pass
999 999 self.invalidatecaches()
1000 1000
1001 1001 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1002 1002 try:
1003 1003 l = lock.lock(lockname, 0, releasefn, desc=desc)
1004 1004 except error.LockHeld, inst:
1005 1005 if not wait:
1006 1006 raise
1007 1007 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1008 1008 (desc, inst.locker))
1009 1009 # default to 600 seconds timeout
1010 1010 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1011 1011 releasefn, desc=desc)
1012 1012 if acquirefn:
1013 1013 acquirefn()
1014 1014 return l
1015 1015
1016 1016 def _afterlock(self, callback):
1017 1017 """add a callback to the current repository lock.
1018 1018
1019 1019 The callback will be executed on lock release."""
1020 1020 l = self._lockref and self._lockref()
1021 1021 if l:
1022 1022 l.postrelease.append(callback)
1023 1023 else:
1024 1024 callback()
1025 1025
1026 1026 def lock(self, wait=True):
1027 1027 '''Lock the repository store (.hg/store) and return a weak reference
1028 1028 to the lock. Use this before modifying the store (e.g. committing or
1029 1029 stripping). If you are opening a transaction, get a lock as well.)'''
1030 1030 l = self._lockref and self._lockref()
1031 1031 if l is not None and l.held:
1032 1032 l.lock()
1033 1033 return l
1034 1034
1035 1035 def unlock():
1036 1036 self.store.write()
1037 1037 if hasunfilteredcache(self, '_phasecache'):
1038 1038 self._phasecache.write()
1039 1039 for k, ce in self._filecache.items():
1040 1040 if k == 'dirstate' or k not in self.__dict__:
1041 1041 continue
1042 1042 ce.refresh()
1043 1043
1044 1044 l = self._lock(self.sjoin("lock"), wait, unlock,
1045 1045 self.invalidate, _('repository %s') % self.origroot)
1046 1046 self._lockref = weakref.ref(l)
1047 1047 return l
1048 1048
1049 1049 def wlock(self, wait=True):
1050 1050 '''Lock the non-store parts of the repository (everything under
1051 1051 .hg except .hg/store) and return a weak reference to the lock.
1052 1052 Use this before modifying files in .hg.'''
1053 1053 l = self._wlockref and self._wlockref()
1054 1054 if l is not None and l.held:
1055 1055 l.lock()
1056 1056 return l
1057 1057
1058 1058 def unlock():
1059 1059 self.dirstate.write()
1060 1060 self._filecache['dirstate'].refresh()
1061 1061
1062 1062 l = self._lock(self.join("wlock"), wait, unlock,
1063 1063 self.invalidatedirstate, _('working directory of %s') %
1064 1064 self.origroot)
1065 1065 self._wlockref = weakref.ref(l)
1066 1066 return l
1067 1067
1068 1068 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1069 1069 """
1070 1070 commit an individual file as part of a larger transaction
1071 1071 """
1072 1072
1073 1073 fname = fctx.path()
1074 1074 text = fctx.data()
1075 1075 flog = self.file(fname)
1076 1076 fparent1 = manifest1.get(fname, nullid)
1077 1077 fparent2 = fparent2o = manifest2.get(fname, nullid)
1078 1078
1079 1079 meta = {}
1080 1080 copy = fctx.renamed()
1081 1081 if copy and copy[0] != fname:
1082 1082 # Mark the new revision of this file as a copy of another
1083 1083 # file. This copy data will effectively act as a parent
1084 1084 # of this new revision. If this is a merge, the first
1085 1085 # parent will be the nullid (meaning "look up the copy data")
1086 1086 # and the second one will be the other parent. For example:
1087 1087 #
1088 1088 # 0 --- 1 --- 3 rev1 changes file foo
1089 1089 # \ / rev2 renames foo to bar and changes it
1090 1090 # \- 2 -/ rev3 should have bar with all changes and
1091 1091 # should record that bar descends from
1092 1092 # bar in rev2 and foo in rev1
1093 1093 #
1094 1094 # this allows this merge to succeed:
1095 1095 #
1096 1096 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1097 1097 # \ / merging rev3 and rev4 should use bar@rev2
1098 1098 # \- 2 --- 4 as the merge base
1099 1099 #
1100 1100
1101 1101 cfname = copy[0]
1102 1102 crev = manifest1.get(cfname)
1103 1103 newfparent = fparent2
1104 1104
1105 1105 if manifest2: # branch merge
1106 1106 if fparent2 == nullid or crev is None: # copied on remote side
1107 1107 if cfname in manifest2:
1108 1108 crev = manifest2[cfname]
1109 1109 newfparent = fparent1
1110 1110
1111 1111 # find source in nearest ancestor if we've lost track
1112 1112 if not crev:
1113 1113 self.ui.debug(" %s: searching for copy revision for %s\n" %
1114 1114 (fname, cfname))
1115 1115 for ancestor in self[None].ancestors():
1116 1116 if cfname in ancestor:
1117 1117 crev = ancestor[cfname].filenode()
1118 1118 break
1119 1119
1120 1120 if crev:
1121 1121 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1122 1122 meta["copy"] = cfname
1123 1123 meta["copyrev"] = hex(crev)
1124 1124 fparent1, fparent2 = nullid, newfparent
1125 1125 else:
1126 1126 self.ui.warn(_("warning: can't find ancestor for '%s' "
1127 1127 "copied from '%s'!\n") % (fname, cfname))
1128 1128
1129 1129 elif fparent2 != nullid:
1130 1130 # is one parent an ancestor of the other?
1131 1131 fparentancestor = flog.ancestor(fparent1, fparent2)
1132 1132 if fparentancestor == fparent1:
1133 1133 fparent1, fparent2 = fparent2, nullid
1134 1134 elif fparentancestor == fparent2:
1135 1135 fparent2 = nullid
1136 1136
1137 1137 # is the file changed?
1138 1138 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1139 1139 changelist.append(fname)
1140 1140 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1141 1141
1142 1142 # are just the flags changed during merge?
1143 1143 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1144 1144 changelist.append(fname)
1145 1145
1146 1146 return fparent1
1147 1147
1148 1148 @unfilteredmethod
1149 1149 def commit(self, text="", user=None, date=None, match=None, force=False,
1150 1150 editor=False, extra={}):
1151 1151 """Add a new revision to current repository.
1152 1152
1153 1153 Revision information is gathered from the working directory,
1154 1154 match can be used to filter the committed files. If editor is
1155 1155 supplied, it is called to get a commit message.
1156 1156 """
1157 1157
1158 1158 def fail(f, msg):
1159 1159 raise util.Abort('%s: %s' % (f, msg))
1160 1160
1161 1161 if not match:
1162 1162 match = matchmod.always(self.root, '')
1163 1163
1164 1164 if not force:
1165 1165 vdirs = []
1166 1166 match.explicitdir = vdirs.append
1167 1167 match.bad = fail
1168 1168
1169 1169 wlock = self.wlock()
1170 1170 try:
1171 1171 wctx = self[None]
1172 1172 merge = len(wctx.parents()) > 1
1173 1173
1174 1174 if (not force and merge and match and
1175 1175 (match.files() or match.anypats())):
1176 1176 raise util.Abort(_('cannot partially commit a merge '
1177 1177 '(do not specify files or patterns)'))
1178 1178
1179 1179 changes = self.status(match=match, clean=force)
1180 1180 if force:
1181 1181 changes[0].extend(changes[6]) # mq may commit unchanged files
1182 1182
1183 1183 # check subrepos
1184 1184 subs = []
1185 1185 commitsubs = set()
1186 1186 newstate = wctx.substate.copy()
1187 1187 # only manage subrepos and .hgsubstate if .hgsub is present
1188 1188 if '.hgsub' in wctx:
1189 1189 # we'll decide whether to track this ourselves, thanks
1190 1190 if '.hgsubstate' in changes[0]:
1191 1191 changes[0].remove('.hgsubstate')
1192 1192 if '.hgsubstate' in changes[2]:
1193 1193 changes[2].remove('.hgsubstate')
1194 1194
1195 1195 # compare current state to last committed state
1196 1196 # build new substate based on last committed state
1197 1197 oldstate = wctx.p1().substate
1198 1198 for s in sorted(newstate.keys()):
1199 1199 if not match(s):
1200 1200 # ignore working copy, use old state if present
1201 1201 if s in oldstate:
1202 1202 newstate[s] = oldstate[s]
1203 1203 continue
1204 1204 if not force:
1205 1205 raise util.Abort(
1206 1206 _("commit with new subrepo %s excluded") % s)
1207 1207 if wctx.sub(s).dirty(True):
1208 1208 if not self.ui.configbool('ui', 'commitsubrepos'):
1209 1209 raise util.Abort(
1210 1210 _("uncommitted changes in subrepo %s") % s,
1211 1211 hint=_("use --subrepos for recursive commit"))
1212 1212 subs.append(s)
1213 1213 commitsubs.add(s)
1214 1214 else:
1215 1215 bs = wctx.sub(s).basestate()
1216 1216 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1217 1217 if oldstate.get(s, (None, None, None))[1] != bs:
1218 1218 subs.append(s)
1219 1219
1220 1220 # check for removed subrepos
1221 1221 for p in wctx.parents():
1222 1222 r = [s for s in p.substate if s not in newstate]
1223 1223 subs += [s for s in r if match(s)]
1224 1224 if subs:
1225 1225 if (not match('.hgsub') and
1226 1226 '.hgsub' in (wctx.modified() + wctx.added())):
1227 1227 raise util.Abort(
1228 1228 _("can't commit subrepos without .hgsub"))
1229 1229 changes[0].insert(0, '.hgsubstate')
1230 1230
1231 1231 elif '.hgsub' in changes[2]:
1232 1232 # clean up .hgsubstate when .hgsub is removed
1233 1233 if ('.hgsubstate' in wctx and
1234 1234 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1235 1235 changes[2].insert(0, '.hgsubstate')
1236 1236
1237 1237 # make sure all explicit patterns are matched
1238 1238 if not force and match.files():
1239 1239 matched = set(changes[0] + changes[1] + changes[2])
1240 1240
1241 1241 for f in match.files():
1242 1242 f = self.dirstate.normalize(f)
1243 1243 if f == '.' or f in matched or f in wctx.substate:
1244 1244 continue
1245 1245 if f in changes[3]: # missing
1246 1246 fail(f, _('file not found!'))
1247 1247 if f in vdirs: # visited directory
1248 1248 d = f + '/'
1249 1249 for mf in matched:
1250 1250 if mf.startswith(d):
1251 1251 break
1252 1252 else:
1253 1253 fail(f, _("no match under directory!"))
1254 1254 elif f not in self.dirstate:
1255 1255 fail(f, _("file not tracked!"))
1256 1256
1257 1257 cctx = context.workingctx(self, text, user, date, extra, changes)
1258 1258
1259 1259 if (not force and not extra.get("close") and not merge
1260 1260 and not cctx.files()
1261 1261 and wctx.branch() == wctx.p1().branch()):
1262 1262 return None
1263 1263
1264 1264 if merge and cctx.deleted():
1265 1265 raise util.Abort(_("cannot commit merge with missing files"))
1266 1266
1267 1267 ms = mergemod.mergestate(self)
1268 1268 for f in changes[0]:
1269 1269 if f in ms and ms[f] == 'u':
1270 1270 raise util.Abort(_("unresolved merge conflicts "
1271 1271 "(see hg help resolve)"))
1272 1272
1273 1273 if editor:
1274 1274 cctx._text = editor(self, cctx, subs)
1275 1275 edited = (text != cctx._text)
1276 1276
1277 1277 # commit subs and write new state
1278 1278 if subs:
1279 1279 for s in sorted(commitsubs):
1280 1280 sub = wctx.sub(s)
1281 1281 self.ui.status(_('committing subrepository %s\n') %
1282 1282 subrepo.subrelpath(sub))
1283 1283 sr = sub.commit(cctx._text, user, date)
1284 1284 newstate[s] = (newstate[s][0], sr)
1285 1285 subrepo.writestate(self, newstate)
1286 1286
1287 1287 # Save commit message in case this transaction gets rolled back
1288 1288 # (e.g. by a pretxncommit hook). Leave the content alone on
1289 1289 # the assumption that the user will use the same editor again.
1290 1290 msgfn = self.savecommitmessage(cctx._text)
1291 1291
1292 1292 p1, p2 = self.dirstate.parents()
1293 1293 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1294 1294 try:
1295 1295 self.hook("precommit", throw=True, parent1=hookp1,
1296 1296 parent2=hookp2)
1297 1297 ret = self.commitctx(cctx, True)
1298 1298 except: # re-raises
1299 1299 if edited:
1300 1300 self.ui.write(
1301 1301 _('note: commit message saved in %s\n') % msgfn)
1302 1302 raise
1303 1303
1304 1304 # update bookmarks, dirstate and mergestate
1305 1305 bookmarks.update(self, [p1, p2], ret)
1306 1306 cctx.markcommitted(ret)
1307 1307 ms.reset()
1308 1308 finally:
1309 1309 wlock.release()
1310 1310
1311 1311 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1312 1312 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1313 1313 self._afterlock(commithook)
1314 1314 return ret
1315 1315
1316 1316 @unfilteredmethod
1317 1317 def commitctx(self, ctx, error=False):
1318 1318 """Add a new revision to current repository.
1319 1319 Revision information is passed via the context argument.
1320 1320 """
1321 1321
1322 1322 tr = lock = None
1323 1323 removed = list(ctx.removed())
1324 1324 p1, p2 = ctx.p1(), ctx.p2()
1325 1325 user = ctx.user()
1326 1326
1327 1327 lock = self.lock()
1328 1328 try:
1329 1329 tr = self.transaction("commit")
1330 1330 trp = weakref.proxy(tr)
1331 1331
1332 1332 if ctx.files():
1333 1333 m1 = p1.manifest().copy()
1334 1334 m2 = p2.manifest()
1335 1335
1336 1336 # check in files
1337 1337 new = {}
1338 1338 changed = []
1339 1339 linkrev = len(self)
1340 1340 for f in sorted(ctx.modified() + ctx.added()):
1341 1341 self.ui.note(f + "\n")
1342 1342 try:
1343 1343 fctx = ctx[f]
1344 1344 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1345 1345 changed)
1346 1346 m1.set(f, fctx.flags())
1347 1347 except OSError, inst:
1348 1348 self.ui.warn(_("trouble committing %s!\n") % f)
1349 1349 raise
1350 1350 except IOError, inst:
1351 1351 errcode = getattr(inst, 'errno', errno.ENOENT)
1352 1352 if error or errcode and errcode != errno.ENOENT:
1353 1353 self.ui.warn(_("trouble committing %s!\n") % f)
1354 1354 raise
1355 1355 else:
1356 1356 removed.append(f)
1357 1357
1358 1358 # update manifest
1359 1359 m1.update(new)
1360 1360 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1361 1361 drop = [f for f in removed if f in m1]
1362 1362 for f in drop:
1363 1363 del m1[f]
1364 1364 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1365 1365 p2.manifestnode(), (new, drop))
1366 1366 files = changed + removed
1367 1367 else:
1368 1368 mn = p1.manifestnode()
1369 1369 files = []
1370 1370
1371 1371 # update changelog
1372 1372 self.changelog.delayupdate()
1373 1373 n = self.changelog.add(mn, files, ctx.description(),
1374 1374 trp, p1.node(), p2.node(),
1375 1375 user, ctx.date(), ctx.extra().copy())
1376 1376 p = lambda: self.changelog.writepending() and self.root or ""
1377 1377 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1378 1378 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1379 1379 parent2=xp2, pending=p)
1380 1380 self.changelog.finalize(trp)
1381 1381 # set the new commit is proper phase
1382 1382 targetphase = phases.newcommitphase(self.ui)
1383 1383 if targetphase:
1384 1384 # retract boundary do not alter parent changeset.
1385 1385 # if a parent have higher the resulting phase will
1386 1386 # be compliant anyway
1387 1387 #
1388 1388 # if minimal phase was 0 we don't need to retract anything
1389 1389 phases.retractboundary(self, targetphase, [n])
1390 1390 tr.close()
1391 1391 branchmap.updatecache(self.filtered('served'))
1392 1392 return n
1393 1393 finally:
1394 1394 if tr:
1395 1395 tr.release()
1396 1396 lock.release()
1397 1397
1398 1398 @unfilteredmethod
1399 1399 def destroying(self):
1400 1400 '''Inform the repository that nodes are about to be destroyed.
1401 1401 Intended for use by strip and rollback, so there's a common
1402 1402 place for anything that has to be done before destroying history.
1403 1403
1404 1404 This is mostly useful for saving state that is in memory and waiting
1405 1405 to be flushed when the current lock is released. Because a call to
1406 1406 destroyed is imminent, the repo will be invalidated causing those
1407 1407 changes to stay in memory (waiting for the next unlock), or vanish
1408 1408 completely.
1409 1409 '''
1410 1410 # When using the same lock to commit and strip, the phasecache is left
1411 1411 # dirty after committing. Then when we strip, the repo is invalidated,
1412 1412 # causing those changes to disappear.
1413 1413 if '_phasecache' in vars(self):
1414 1414 self._phasecache.write()
1415 1415
1416 1416 @unfilteredmethod
1417 1417 def destroyed(self):
1418 1418 '''Inform the repository that nodes have been destroyed.
1419 1419 Intended for use by strip and rollback, so there's a common
1420 1420 place for anything that has to be done after destroying history.
1421 1421 '''
1422 1422 # When one tries to:
1423 1423 # 1) destroy nodes thus calling this method (e.g. strip)
1424 1424 # 2) use phasecache somewhere (e.g. commit)
1425 1425 #
1426 1426 # then 2) will fail because the phasecache contains nodes that were
1427 1427 # removed. We can either remove phasecache from the filecache,
1428 1428 # causing it to reload next time it is accessed, or simply filter
1429 1429 # the removed nodes now and write the updated cache.
1430 1430 self._phasecache.filterunknown(self)
1431 1431 self._phasecache.write()
1432 1432
1433 1433 # update the 'served' branch cache to help read only server process
1434 1434 # Thanks to branchcache collaboration this is done from the nearest
1435 1435 # filtered subset and it is expected to be fast.
1436 1436 branchmap.updatecache(self.filtered('served'))
1437 1437
1438 1438 # Ensure the persistent tag cache is updated. Doing it now
1439 1439 # means that the tag cache only has to worry about destroyed
1440 1440 # heads immediately after a strip/rollback. That in turn
1441 1441 # guarantees that "cachetip == currenttip" (comparing both rev
1442 1442 # and node) always means no nodes have been added or destroyed.
1443 1443
1444 1444 # XXX this is suboptimal when qrefresh'ing: we strip the current
1445 1445 # head, refresh the tag cache, then immediately add a new head.
1446 1446 # But I think doing it this way is necessary for the "instant
1447 1447 # tag cache retrieval" case to work.
1448 1448 self.invalidate()
1449 1449
1450 1450 def walk(self, match, node=None):
1451 1451 '''
1452 1452 walk recursively through the directory tree or a given
1453 1453 changeset, finding all files matched by the match
1454 1454 function
1455 1455 '''
1456 1456 return self[node].walk(match)
1457 1457
1458 1458 def status(self, node1='.', node2=None, match=None,
1459 1459 ignored=False, clean=False, unknown=False,
1460 1460 listsubrepos=False):
1461 1461 """return status of files between two nodes or node and working
1462 1462 directory.
1463 1463
1464 1464 If node1 is None, use the first dirstate parent instead.
1465 1465 If node2 is None, compare node1 with working directory.
1466 1466 """
1467 1467
1468 1468 def mfmatches(ctx):
1469 1469 mf = ctx.manifest().copy()
1470 1470 if match.always():
1471 1471 return mf
1472 1472 for fn in mf.keys():
1473 1473 if not match(fn):
1474 1474 del mf[fn]
1475 1475 return mf
1476 1476
1477 1477 ctx1 = self[node1]
1478 1478 ctx2 = self[node2]
1479 1479
1480 1480 working = ctx2.rev() is None
1481 1481 parentworking = working and ctx1 == self['.']
1482 1482 match = match or matchmod.always(self.root, self.getcwd())
1483 1483 listignored, listclean, listunknown = ignored, clean, unknown
1484 1484
1485 1485 # load earliest manifest first for caching reasons
1486 1486 if not working and ctx2.rev() < ctx1.rev():
1487 1487 ctx2.manifest()
1488 1488
1489 1489 if not parentworking:
1490 1490 def bad(f, msg):
1491 1491 # 'f' may be a directory pattern from 'match.files()',
1492 1492 # so 'f not in ctx1' is not enough
1493 1493 if f not in ctx1 and f not in ctx1.dirs():
1494 1494 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1495 1495 match.bad = bad
1496 1496
1497 1497 if working: # we need to scan the working dir
1498 1498 subrepos = []
1499 1499 if '.hgsub' in self.dirstate:
1500 1500 subrepos = sorted(ctx2.substate)
1501 1501 s = self.dirstate.status(match, subrepos, listignored,
1502 1502 listclean, listunknown)
1503 1503 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1504 1504
1505 1505 # check for any possibly clean files
1506 1506 if parentworking and cmp:
1507 1507 fixup = []
1508 1508 # do a full compare of any files that might have changed
1509 1509 for f in sorted(cmp):
1510 1510 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1511 1511 or ctx1[f].cmp(ctx2[f])):
1512 1512 modified.append(f)
1513 1513 else:
1514 1514 fixup.append(f)
1515 1515
1516 1516 # update dirstate for files that are actually clean
1517 1517 if fixup:
1518 1518 if listclean:
1519 1519 clean += fixup
1520 1520
1521 1521 try:
1522 1522 # updating the dirstate is optional
1523 1523 # so we don't wait on the lock
1524 1524 wlock = self.wlock(False)
1525 1525 try:
1526 1526 for f in fixup:
1527 1527 self.dirstate.normal(f)
1528 1528 finally:
1529 1529 wlock.release()
1530 1530 except error.LockError:
1531 1531 pass
1532 1532
1533 1533 if not parentworking:
1534 1534 mf1 = mfmatches(ctx1)
1535 1535 if working:
1536 1536 # we are comparing working dir against non-parent
1537 1537 # generate a pseudo-manifest for the working dir
1538 1538 mf2 = mfmatches(self['.'])
1539 1539 for f in cmp + modified + added:
1540 1540 mf2[f] = None
1541 1541 mf2.set(f, ctx2.flags(f))
1542 1542 for f in removed:
1543 1543 if f in mf2:
1544 1544 del mf2[f]
1545 1545 else:
1546 1546 # we are comparing two revisions
1547 1547 deleted, unknown, ignored = [], [], []
1548 1548 mf2 = mfmatches(ctx2)
1549 1549
1550 1550 modified, added, clean = [], [], []
1551 1551 withflags = mf1.withflags() | mf2.withflags()
1552 1552 for fn, mf2node in mf2.iteritems():
1553 1553 if fn in mf1:
1554 1554 if (fn not in deleted and
1555 1555 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1556 1556 (mf1[fn] != mf2node and
1557 1557 (mf2node or ctx1[fn].cmp(ctx2[fn]))))):
1558 1558 modified.append(fn)
1559 1559 elif listclean:
1560 1560 clean.append(fn)
1561 1561 del mf1[fn]
1562 1562 elif fn not in deleted:
1563 1563 added.append(fn)
1564 1564 removed = mf1.keys()
1565 1565
1566 1566 if working and modified and not self.dirstate._checklink:
1567 1567 # Symlink placeholders may get non-symlink-like contents
1568 1568 # via user error or dereferencing by NFS or Samba servers,
1569 1569 # so we filter out any placeholders that don't look like a
1570 1570 # symlink
1571 1571 sane = []
1572 1572 for f in modified:
1573 1573 if ctx2.flags(f) == 'l':
1574 1574 d = ctx2[f].data()
1575 1575 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1576 1576 self.ui.debug('ignoring suspect symlink placeholder'
1577 1577 ' "%s"\n' % f)
1578 1578 continue
1579 1579 sane.append(f)
1580 1580 modified = sane
1581 1581
1582 1582 r = modified, added, removed, deleted, unknown, ignored, clean
1583 1583
1584 1584 if listsubrepos:
1585 1585 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1586 1586 if working:
1587 1587 rev2 = None
1588 1588 else:
1589 1589 rev2 = ctx2.substate[subpath][1]
1590 1590 try:
1591 1591 submatch = matchmod.narrowmatcher(subpath, match)
1592 1592 s = sub.status(rev2, match=submatch, ignored=listignored,
1593 1593 clean=listclean, unknown=listunknown,
1594 1594 listsubrepos=True)
1595 1595 for rfiles, sfiles in zip(r, s):
1596 1596 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1597 1597 except error.LookupError:
1598 1598 self.ui.status(_("skipping missing subrepository: %s\n")
1599 1599 % subpath)
1600 1600
1601 1601 for l in r:
1602 1602 l.sort()
1603 1603 return r
1604 1604
1605 1605 def heads(self, start=None):
1606 1606 heads = self.changelog.heads(start)
1607 1607 # sort the output in rev descending order
1608 1608 return sorted(heads, key=self.changelog.rev, reverse=True)
1609 1609
1610 1610 def branchheads(self, branch=None, start=None, closed=False):
1611 1611 '''return a (possibly filtered) list of heads for the given branch
1612 1612
1613 1613 Heads are returned in topological order, from newest to oldest.
1614 1614 If branch is None, use the dirstate branch.
1615 1615 If start is not None, return only heads reachable from start.
1616 1616 If closed is True, return heads that are marked as closed as well.
1617 1617 '''
1618 1618 if branch is None:
1619 1619 branch = self[None].branch()
1620 1620 branches = self.branchmap()
1621 1621 if branch not in branches:
1622 1622 return []
1623 1623 # the cache returns heads ordered lowest to highest
1624 1624 bheads = list(reversed(branches[branch]))
1625 1625 if start is not None:
1626 1626 # filter out the heads that cannot be reached from startrev
1627 1627 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1628 1628 bheads = [h for h in bheads if h in fbheads]
1629 1629 if not closed:
1630 1630 bheads = [h for h in bheads if not self[h].closesbranch()]
1631 1631 return bheads
1632 1632
1633 1633 def branches(self, nodes):
1634 1634 if not nodes:
1635 1635 nodes = [self.changelog.tip()]
1636 1636 b = []
1637 1637 for n in nodes:
1638 1638 t = n
1639 1639 while True:
1640 1640 p = self.changelog.parents(n)
1641 1641 if p[1] != nullid or p[0] == nullid:
1642 1642 b.append((t, n, p[0], p[1]))
1643 1643 break
1644 1644 n = p[0]
1645 1645 return b
1646 1646
1647 1647 def between(self, pairs):
1648 1648 r = []
1649 1649
1650 1650 for top, bottom in pairs:
1651 1651 n, l, i = top, [], 0
1652 1652 f = 1
1653 1653
1654 1654 while n != bottom and n != nullid:
1655 1655 p = self.changelog.parents(n)[0]
1656 1656 if i == f:
1657 1657 l.append(n)
1658 1658 f = f * 2
1659 1659 n = p
1660 1660 i += 1
1661 1661
1662 1662 r.append(l)
1663 1663
1664 1664 return r
1665 1665
1666 1666 def pull(self, remote, heads=None, force=False):
1667 1667 if remote.local():
1668 1668 missing = set(remote.requirements) - self.supported
1669 1669 if missing:
1670 1670 msg = _("required features are not"
1671 1671 " supported in the destination:"
1672 1672 " %s") % (', '.join(sorted(missing)))
1673 1673 raise util.Abort(msg)
1674 1674
1675 1675 # don't open transaction for nothing or you break future useful
1676 1676 # rollback call
1677 1677 tr = None
1678 1678 trname = 'pull\n' + util.hidepassword(remote.url())
1679 1679 lock = self.lock()
1680 1680 try:
1681 1681 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1682 1682 force=force)
1683 1683 common, fetch, rheads = tmp
1684 1684 if not fetch:
1685 1685 self.ui.status(_("no changes found\n"))
1686 1686 added = []
1687 1687 result = 0
1688 1688 else:
1689 1689 tr = self.transaction(trname)
1690 1690 if heads is None and list(common) == [nullid]:
1691 1691 self.ui.status(_("requesting all changes\n"))
1692 1692 elif heads is None and remote.capable('changegroupsubset'):
1693 1693 # issue1320, avoid a race if remote changed after discovery
1694 1694 heads = rheads
1695 1695
1696 1696 if remote.capable('getbundle'):
1697 1697 # TODO: get bundlecaps from remote
1698 1698 cg = remote.getbundle('pull', common=common,
1699 1699 heads=heads or rheads)
1700 1700 elif heads is None:
1701 1701 cg = remote.changegroup(fetch, 'pull')
1702 1702 elif not remote.capable('changegroupsubset'):
1703 1703 raise util.Abort(_("partial pull cannot be done because "
1704 1704 "other repository doesn't support "
1705 1705 "changegroupsubset."))
1706 1706 else:
1707 1707 cg = remote.changegroupsubset(fetch, heads, 'pull')
1708 1708 # we use unfiltered changelog here because hidden revision must
1709 1709 # be taken in account for phase synchronization. They may
1710 1710 # becomes public and becomes visible again.
1711 1711 cl = self.unfiltered().changelog
1712 1712 clstart = len(cl)
1713 1713 result = self.addchangegroup(cg, 'pull', remote.url())
1714 1714 clend = len(cl)
1715 1715 added = [cl.node(r) for r in xrange(clstart, clend)]
1716 1716
1717 1717 # compute target subset
1718 1718 if heads is None:
1719 1719 # We pulled every thing possible
1720 1720 # sync on everything common
1721 1721 subset = common + added
1722 1722 else:
1723 1723 # We pulled a specific subset
1724 1724 # sync on this subset
1725 1725 subset = heads
1726 1726
1727 1727 # Get remote phases data from remote
1728 1728 remotephases = remote.listkeys('phases')
1729 1729 publishing = bool(remotephases.get('publishing', False))
1730 1730 if remotephases and not publishing:
1731 1731 # remote is new and unpublishing
1732 1732 pheads, _dr = phases.analyzeremotephases(self, subset,
1733 1733 remotephases)
1734 1734 phases.advanceboundary(self, phases.public, pheads)
1735 1735 phases.advanceboundary(self, phases.draft, subset)
1736 1736 else:
1737 1737 # Remote is old or publishing all common changesets
1738 1738 # should be seen as public
1739 1739 phases.advanceboundary(self, phases.public, subset)
1740 1740
1741 1741 def gettransaction():
1742 1742 if tr is None:
1743 1743 return self.transaction(trname)
1744 1744 return tr
1745 1745
1746 1746 obstr = obsolete.syncpull(self, remote, gettransaction)
1747 1747 if obstr is not None:
1748 1748 tr = obstr
1749 1749
1750 1750 if tr is not None:
1751 1751 tr.close()
1752 1752 finally:
1753 1753 if tr is not None:
1754 1754 tr.release()
1755 1755 lock.release()
1756 1756
1757 1757 return result
1758 1758
1759 1759 def checkpush(self, force, revs):
1760 1760 """Extensions can override this function if additional checks have
1761 1761 to be performed before pushing, or call it if they override push
1762 1762 command.
1763 1763 """
1764 1764 pass
1765 1765
1766 1766 def push(self, remote, force=False, revs=None, newbranch=False):
1767 1767 '''Push outgoing changesets (limited by revs) from the current
1768 1768 repository to remote. Return an integer:
1769 1769 - None means nothing to push
1770 1770 - 0 means HTTP error
1771 1771 - 1 means we pushed and remote head count is unchanged *or*
1772 1772 we have outgoing changesets but refused to push
1773 1773 - other values as described by addchangegroup()
1774 1774 '''
1775 1775 if remote.local():
1776 1776 missing = set(self.requirements) - remote.local().supported
1777 1777 if missing:
1778 1778 msg = _("required features are not"
1779 1779 " supported in the destination:"
1780 1780 " %s") % (', '.join(sorted(missing)))
1781 1781 raise util.Abort(msg)
1782 1782
1783 1783 # there are two ways to push to remote repo:
1784 1784 #
1785 1785 # addchangegroup assumes local user can lock remote
1786 1786 # repo (local filesystem, old ssh servers).
1787 1787 #
1788 1788 # unbundle assumes local user cannot lock remote repo (new ssh
1789 1789 # servers, http servers).
1790 1790
1791 1791 if not remote.canpush():
1792 1792 raise util.Abort(_("destination does not support push"))
1793 1793 unfi = self.unfiltered()
1794 1794 def localphasemove(nodes, phase=phases.public):
1795 1795 """move <nodes> to <phase> in the local source repo"""
1796 1796 if locallock is not None:
1797 1797 phases.advanceboundary(self, phase, nodes)
1798 1798 else:
1799 1799 # repo is not locked, do not change any phases!
1800 1800 # Informs the user that phases should have been moved when
1801 1801 # applicable.
1802 1802 actualmoves = [n for n in nodes if phase < self[n].phase()]
1803 1803 phasestr = phases.phasenames[phase]
1804 1804 if actualmoves:
1805 1805 self.ui.status(_('cannot lock source repo, skipping local'
1806 1806 ' %s phase update\n') % phasestr)
1807 1807 # get local lock as we might write phase data
1808 1808 locallock = None
1809 1809 try:
1810 1810 locallock = self.lock()
1811 1811 except IOError, err:
1812 1812 if err.errno != errno.EACCES:
1813 1813 raise
1814 1814 # source repo cannot be locked.
1815 1815 # We do not abort the push, but just disable the local phase
1816 1816 # synchronisation.
1817 1817 msg = 'cannot lock source repository: %s\n' % err
1818 1818 self.ui.debug(msg)
1819 1819 try:
1820 1820 self.checkpush(force, revs)
1821 1821 lock = None
1822 1822 unbundle = remote.capable('unbundle')
1823 1823 if not unbundle:
1824 1824 lock = remote.lock()
1825 1825 try:
1826 1826 # discovery
1827 1827 fci = discovery.findcommonincoming
1828 1828 commoninc = fci(unfi, remote, force=force)
1829 1829 common, inc, remoteheads = commoninc
1830 1830 fco = discovery.findcommonoutgoing
1831 1831 outgoing = fco(unfi, remote, onlyheads=revs,
1832 1832 commoninc=commoninc, force=force)
1833 1833
1834 1834
1835 1835 if not outgoing.missing:
1836 1836 # nothing to push
1837 1837 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
1838 1838 ret = None
1839 1839 else:
1840 1840 # something to push
1841 1841 if not force:
1842 1842 # if self.obsstore == False --> no obsolete
1843 1843 # then, save the iteration
1844 1844 if unfi.obsstore:
1845 1845 # this message are here for 80 char limit reason
1846 1846 mso = _("push includes obsolete changeset: %s!")
1847 1847 mst = "push includes %s changeset: %s!"
1848 1848 # plain versions for i18n tool to detect them
1849 1849 _("push includes unstable changeset: %s!")
1850 1850 _("push includes bumped changeset: %s!")
1851 1851 _("push includes divergent changeset: %s!")
1852 1852 # If we are to push if there is at least one
1853 1853 # obsolete or unstable changeset in missing, at
1854 1854 # least one of the missinghead will be obsolete or
1855 1855 # unstable. So checking heads only is ok
1856 1856 for node in outgoing.missingheads:
1857 1857 ctx = unfi[node]
1858 1858 if ctx.obsolete():
1859 1859 raise util.Abort(mso % ctx)
1860 1860 elif ctx.troubled():
1861 1861 raise util.Abort(_(mst)
1862 1862 % (ctx.troubles()[0],
1863 1863 ctx))
1864 1864 discovery.checkheads(unfi, remote, outgoing,
1865 1865 remoteheads, newbranch,
1866 1866 bool(inc))
1867 1867
1868 1868 # TODO: get bundlecaps from remote
1869 1869 bundlecaps = None
1870 1870 # create a changegroup from local
1871 1871 if revs is None and not outgoing.excluded:
1872 1872 # push everything,
1873 1873 # use the fast path, no race possible on push
1874 1874 bundler = changegroup.bundle10(self, bundlecaps)
1875 1875 cg = self._changegroupsubset(outgoing,
1876 1876 bundler,
1877 1877 'push',
1878 1878 fastpath=True)
1879 1879 else:
1880 1880 cg = self.getlocalbundle('push', outgoing, bundlecaps)
1881 1881
1882 1882 # apply changegroup to remote
1883 1883 if unbundle:
1884 1884 # local repo finds heads on server, finds out what
1885 1885 # revs it must push. once revs transferred, if server
1886 1886 # finds it has different heads (someone else won
1887 1887 # commit/push race), server aborts.
1888 1888 if force:
1889 1889 remoteheads = ['force']
1890 1890 # ssh: return remote's addchangegroup()
1891 1891 # http: return remote's addchangegroup() or 0 for error
1892 1892 ret = remote.unbundle(cg, remoteheads, 'push')
1893 1893 else:
1894 1894 # we return an integer indicating remote head count
1895 1895 # change
1896 1896 ret = remote.addchangegroup(cg, 'push', self.url())
1897 1897
1898 1898 if ret:
1899 1899 # push succeed, synchronize target of the push
1900 1900 cheads = outgoing.missingheads
1901 1901 elif revs is None:
1902 1902 # All out push fails. synchronize all common
1903 1903 cheads = outgoing.commonheads
1904 1904 else:
1905 1905 # I want cheads = heads(::missingheads and ::commonheads)
1906 1906 # (missingheads is revs with secret changeset filtered out)
1907 1907 #
1908 1908 # This can be expressed as:
1909 1909 # cheads = ( (missingheads and ::commonheads)
1910 1910 # + (commonheads and ::missingheads))"
1911 1911 # )
1912 1912 #
1913 1913 # while trying to push we already computed the following:
1914 1914 # common = (::commonheads)
1915 1915 # missing = ((commonheads::missingheads) - commonheads)
1916 1916 #
1917 1917 # We can pick:
1918 1918 # * missingheads part of common (::commonheads)
1919 1919 common = set(outgoing.common)
1920 1920 cheads = [node for node in revs if node in common]
1921 1921 # and
1922 1922 # * commonheads parents on missing
1923 1923 revset = unfi.set('%ln and parents(roots(%ln))',
1924 1924 outgoing.commonheads,
1925 1925 outgoing.missing)
1926 1926 cheads.extend(c.node() for c in revset)
1927 1927 # even when we don't push, exchanging phase data is useful
1928 1928 remotephases = remote.listkeys('phases')
1929 1929 if (self.ui.configbool('ui', '_usedassubrepo', False)
1930 1930 and remotephases # server supports phases
1931 1931 and ret is None # nothing was pushed
1932 1932 and remotephases.get('publishing', False)):
1933 1933 # When:
1934 1934 # - this is a subrepo push
1935 1935 # - and remote support phase
1936 1936 # - and no changeset was pushed
1937 1937 # - and remote is publishing
1938 1938 # We may be in issue 3871 case!
1939 1939 # We drop the possible phase synchronisation done by
1940 1940 # courtesy to publish changesets possibly locally draft
1941 1941 # on the remote.
1942 1942 remotephases = {'publishing': 'True'}
1943 1943 if not remotephases: # old server or public only repo
1944 1944 localphasemove(cheads)
1945 1945 # don't push any phase data as there is nothing to push
1946 1946 else:
1947 1947 ana = phases.analyzeremotephases(self, cheads, remotephases)
1948 1948 pheads, droots = ana
1949 1949 ### Apply remote phase on local
1950 1950 if remotephases.get('publishing', False):
1951 1951 localphasemove(cheads)
1952 1952 else: # publish = False
1953 1953 localphasemove(pheads)
1954 1954 localphasemove(cheads, phases.draft)
1955 1955 ### Apply local phase on remote
1956 1956
1957 1957 # Get the list of all revs draft on remote by public here.
1958 1958 # XXX Beware that revset break if droots is not strictly
1959 1959 # XXX root we may want to ensure it is but it is costly
1960 1960 outdated = unfi.set('heads((%ln::%ln) and public())',
1961 1961 droots, cheads)
1962 1962 for newremotehead in outdated:
1963 1963 r = remote.pushkey('phases',
1964 1964 newremotehead.hex(),
1965 1965 str(phases.draft),
1966 1966 str(phases.public))
1967 1967 if not r:
1968 1968 self.ui.warn(_('updating %s to public failed!\n')
1969 1969 % newremotehead)
1970 1970 self.ui.debug('try to push obsolete markers to remote\n')
1971 1971 obsolete.syncpush(self, remote)
1972 1972 finally:
1973 1973 if lock is not None:
1974 1974 lock.release()
1975 1975 finally:
1976 1976 if locallock is not None:
1977 1977 locallock.release()
1978 1978
1979 1979 bookmarks.updateremote(self.ui, unfi, remote, revs)
1980 1980 return ret
1981 1981
1982 1982 def changegroupinfo(self, nodes, source):
1983 1983 if self.ui.verbose or source == 'bundle':
1984 1984 self.ui.status(_("%d changesets found\n") % len(nodes))
1985 1985 if self.ui.debugflag:
1986 1986 self.ui.debug("list of changesets:\n")
1987 1987 for node in nodes:
1988 1988 self.ui.debug("%s\n" % hex(node))
1989 1989
1990 1990 def changegroupsubset(self, bases, heads, source):
1991 1991 """Compute a changegroup consisting of all the nodes that are
1992 1992 descendants of any of the bases and ancestors of any of the heads.
1993 1993 Return a chunkbuffer object whose read() method will return
1994 1994 successive changegroup chunks.
1995 1995
1996 1996 It is fairly complex as determining which filenodes and which
1997 1997 manifest nodes need to be included for the changeset to be complete
1998 1998 is non-trivial.
1999 1999
2000 2000 Another wrinkle is doing the reverse, figuring out which changeset in
2001 2001 the changegroup a particular filenode or manifestnode belongs to.
2002 2002 """
2003 2003 cl = self.changelog
2004 2004 if not bases:
2005 2005 bases = [nullid]
2006 2006 # TODO: remove call to nodesbetween.
2007 2007 csets, bases, heads = cl.nodesbetween(bases, heads)
2008 2008 bases = [p for n in bases for p in cl.parents(n) if p != nullid]
2009 2009 outgoing = discovery.outgoing(cl, bases, heads)
2010 2010 bundler = changegroup.bundle10(self)
2011 2011 return self._changegroupsubset(outgoing, bundler, source)
2012 2012
2013 2013 def getlocalbundle(self, source, outgoing, bundlecaps=None):
2014 2014 """Like getbundle, but taking a discovery.outgoing as an argument.
2015 2015
2016 2016 This is only implemented for local repos and reuses potentially
2017 2017 precomputed sets in outgoing."""
2018 2018 if not outgoing.missing:
2019 2019 return None
2020 2020 bundler = changegroup.bundle10(self, bundlecaps)
2021 2021 return self._changegroupsubset(outgoing, bundler, source)
2022 2022
2023 2023 def getbundle(self, source, heads=None, common=None, bundlecaps=None):
2024 2024 """Like changegroupsubset, but returns the set difference between the
2025 2025 ancestors of heads and the ancestors common.
2026 2026
2027 2027 If heads is None, use the local heads. If common is None, use [nullid].
2028 2028
2029 2029 The nodes in common might not all be known locally due to the way the
2030 2030 current discovery protocol works.
2031 2031 """
2032 2032 cl = self.changelog
2033 2033 if common:
2034 2034 hasnode = cl.hasnode
2035 2035 common = [n for n in common if hasnode(n)]
2036 2036 else:
2037 2037 common = [nullid]
2038 2038 if not heads:
2039 2039 heads = cl.heads()
2040 2040 return self.getlocalbundle(source,
2041 2041 discovery.outgoing(cl, common, heads),
2042 2042 bundlecaps=bundlecaps)
2043 2043
2044 2044 @unfilteredmethod
2045 2045 def _changegroupsubset(self, outgoing, bundler, source,
2046 2046 fastpath=False):
2047 2047 commonrevs = outgoing.common
2048 2048 csets = outgoing.missing
2049 2049 heads = outgoing.missingheads
2050 2050 # We go through the fast path if we get told to, or if all (unfiltered
2051 2051 # heads have been requested (since we then know there all linkrevs will
2052 2052 # be pulled by the client).
2053 2053 heads.sort()
2054 2054 fastpathlinkrev = fastpath or (
2055 2055 self.filtername is None and heads == sorted(self.heads()))
2056 2056
2057 2057 self.hook('preoutgoing', throw=True, source=source)
2058 2058 self.changegroupinfo(csets, source)
2059 2059 gengroup = bundler.generate(commonrevs, csets, fastpathlinkrev, source)
2060 2060 return changegroup.unbundle10(util.chunkbuffer(gengroup), 'UN')
2061 2061
2062 2062 def changegroup(self, basenodes, source):
2063 2063 # to avoid a race we use changegroupsubset() (issue1320)
2064 2064 return self.changegroupsubset(basenodes, self.heads(), source)
2065 2065
2066 2066 @unfilteredmethod
2067 2067 def addchangegroup(self, source, srctype, url, emptyok=False):
2068 2068 """Add the changegroup returned by source.read() to this repo.
2069 2069 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2070 2070 the URL of the repo where this changegroup is coming from.
2071 2071
2072 2072 Return an integer summarizing the change to this repo:
2073 2073 - nothing changed or no source: 0
2074 2074 - more heads than before: 1+added heads (2..n)
2075 2075 - fewer heads than before: -1-removed heads (-2..-n)
2076 2076 - number of heads stays the same: 1
2077 2077 """
2078 2078 def csmap(x):
2079 2079 self.ui.debug("add changeset %s\n" % short(x))
2080 2080 return len(cl)
2081 2081
2082 2082 def revmap(x):
2083 2083 return cl.rev(x)
2084 2084
2085 2085 if not source:
2086 2086 return 0
2087 2087
2088 2088 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2089 2089
2090 2090 changesets = files = revisions = 0
2091 2091 efiles = set()
2092 2092
2093 2093 # write changelog data to temp files so concurrent readers will not see
2094 2094 # inconsistent view
2095 2095 cl = self.changelog
2096 2096 cl.delayupdate()
2097 2097 oldheads = cl.heads()
2098 2098
2099 2099 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2100 2100 try:
2101 2101 trp = weakref.proxy(tr)
2102 2102 # pull off the changeset group
2103 2103 self.ui.status(_("adding changesets\n"))
2104 2104 clstart = len(cl)
2105 2105 class prog(object):
2106 2106 step = _('changesets')
2107 2107 count = 1
2108 2108 ui = self.ui
2109 2109 total = None
2110 2110 def __call__(self):
2111 2111 self.ui.progress(self.step, self.count, unit=_('chunks'),
2112 2112 total=self.total)
2113 2113 self.count += 1
2114 2114 pr = prog()
2115 2115 source.callback = pr
2116 2116
2117 2117 source.changelogheader()
2118 2118 srccontent = cl.addgroup(source, csmap, trp)
2119 2119 if not (srccontent or emptyok):
2120 2120 raise util.Abort(_("received changelog group is empty"))
2121 2121 clend = len(cl)
2122 2122 changesets = clend - clstart
2123 2123 for c in xrange(clstart, clend):
2124 2124 efiles.update(self[c].files())
2125 2125 efiles = len(efiles)
2126 2126 self.ui.progress(_('changesets'), None)
2127 2127
2128 2128 # pull off the manifest group
2129 2129 self.ui.status(_("adding manifests\n"))
2130 2130 pr.step = _('manifests')
2131 2131 pr.count = 1
2132 2132 pr.total = changesets # manifests <= changesets
2133 2133 # no need to check for empty manifest group here:
2134 2134 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2135 2135 # no new manifest will be created and the manifest group will
2136 2136 # be empty during the pull
2137 2137 source.manifestheader()
2138 2138 self.manifest.addgroup(source, revmap, trp)
2139 2139 self.ui.progress(_('manifests'), None)
2140 2140
2141 2141 needfiles = {}
2142 2142 if self.ui.configbool('server', 'validate', default=False):
2143 2143 # validate incoming csets have their manifests
2144 2144 for cset in xrange(clstart, clend):
2145 2145 mfest = self.changelog.read(self.changelog.node(cset))[0]
2146 2146 mfest = self.manifest.readdelta(mfest)
2147 2147 # store file nodes we must see
2148 2148 for f, n in mfest.iteritems():
2149 2149 needfiles.setdefault(f, set()).add(n)
2150 2150
2151 2151 # process the files
2152 2152 self.ui.status(_("adding file changes\n"))
2153 2153 pr.step = _('files')
2154 2154 pr.count = 1
2155 2155 pr.total = efiles
2156 2156 source.callback = None
2157 2157
2158 2158 newrevs, newfiles = self.addchangegroupfiles(source, revmap, trp,
2159 2159 pr, needfiles)
2160 2160 revisions += newrevs
2161 2161 files += newfiles
2162 2162
2163 2163 dh = 0
2164 2164 if oldheads:
2165 2165 heads = cl.heads()
2166 2166 dh = len(heads) - len(oldheads)
2167 2167 for h in heads:
2168 2168 if h not in oldheads and self[h].closesbranch():
2169 2169 dh -= 1
2170 2170 htext = ""
2171 2171 if dh:
2172 2172 htext = _(" (%+d heads)") % dh
2173 2173
2174 2174 self.ui.status(_("added %d changesets"
2175 2175 " with %d changes to %d files%s\n")
2176 2176 % (changesets, revisions, files, htext))
2177 2177 self.invalidatevolatilesets()
2178 2178
2179 2179 if changesets > 0:
2180 2180 p = lambda: cl.writepending() and self.root or ""
2181 2181 self.hook('pretxnchangegroup', throw=True,
2182 2182 node=hex(cl.node(clstart)), source=srctype,
2183 2183 url=url, pending=p)
2184 2184
2185 2185 added = [cl.node(r) for r in xrange(clstart, clend)]
2186 2186 publishing = self.ui.configbool('phases', 'publish', True)
2187 2187 if srctype == 'push':
2188 2188 # Old server can not push the boundary themself.
2189 2189 # New server won't push the boundary if changeset already
2190 2190 # existed locally as secrete
2191 2191 #
2192 2192 # We should not use added here but the list of all change in
2193 2193 # the bundle
2194 2194 if publishing:
2195 2195 phases.advanceboundary(self, phases.public, srccontent)
2196 2196 else:
2197 2197 phases.advanceboundary(self, phases.draft, srccontent)
2198 2198 phases.retractboundary(self, phases.draft, added)
2199 2199 elif srctype != 'strip':
2200 2200 # publishing only alter behavior during push
2201 2201 #
2202 2202 # strip should not touch boundary at all
2203 2203 phases.retractboundary(self, phases.draft, added)
2204 2204
2205 2205 # make changelog see real files again
2206 2206 cl.finalize(trp)
2207 2207
2208 2208 tr.close()
2209 2209
2210 2210 if changesets > 0:
2211 2211 if srctype != 'strip':
2212 2212 # During strip, branchcache is invalid but coming call to
2213 2213 # `destroyed` will repair it.
2214 2214 # In other case we can safely update cache on disk.
2215 2215 branchmap.updatecache(self.filtered('served'))
2216 2216 def runhooks():
2217 2217 # These hooks run when the lock releases, not when the
2218 2218 # transaction closes. So it's possible for the changelog
2219 2219 # to have changed since we last saw it.
2220 2220 if clstart >= len(self):
2221 2221 return
2222 2222
2223 2223 # forcefully update the on-disk branch cache
2224 2224 self.ui.debug("updating the branch cache\n")
2225 2225 self.hook("changegroup", node=hex(cl.node(clstart)),
2226 2226 source=srctype, url=url)
2227 2227
2228 2228 for n in added:
2229 2229 self.hook("incoming", node=hex(n), source=srctype,
2230 2230 url=url)
2231 2231
2232 2232 newheads = [h for h in self.heads() if h not in oldheads]
2233 2233 self.ui.log("incoming",
2234 2234 "%s incoming changes - new heads: %s\n",
2235 2235 len(added),
2236 2236 ', '.join([hex(c[:6]) for c in newheads]))
2237 2237 self._afterlock(runhooks)
2238 2238
2239 2239 finally:
2240 2240 tr.release()
2241 2241 # never return 0 here:
2242 2242 if dh < 0:
2243 2243 return dh - 1
2244 2244 else:
2245 2245 return dh + 1
2246 2246
2247 2247 def addchangegroupfiles(self, source, revmap, trp, pr, needfiles):
2248 2248 revisions = 0
2249 2249 files = 0
2250 2250 while True:
2251 2251 chunkdata = source.filelogheader()
2252 2252 if not chunkdata:
2253 2253 break
2254 2254 f = chunkdata["filename"]
2255 2255 self.ui.debug("adding %s revisions\n" % f)
2256 2256 pr()
2257 2257 fl = self.file(f)
2258 2258 o = len(fl)
2259 2259 if not fl.addgroup(source, revmap, trp):
2260 2260 raise util.Abort(_("received file revlog group is empty"))
2261 2261 revisions += len(fl) - o
2262 2262 files += 1
2263 2263 if f in needfiles:
2264 2264 needs = needfiles[f]
2265 2265 for new in xrange(o, len(fl)):
2266 2266 n = fl.node(new)
2267 2267 if n in needs:
2268 2268 needs.remove(n)
2269 2269 else:
2270 2270 raise util.Abort(
2271 2271 _("received spurious file revlog entry"))
2272 2272 if not needs:
2273 2273 del needfiles[f]
2274 2274 self.ui.progress(_('files'), None)
2275 2275
2276 2276 for f, needs in needfiles.iteritems():
2277 2277 fl = self.file(f)
2278 2278 for n in needs:
2279 2279 try:
2280 2280 fl.rev(n)
2281 2281 except error.LookupError:
2282 2282 raise util.Abort(
2283 2283 _('missing file data for %s:%s - run hg verify') %
2284 2284 (f, hex(n)))
2285 2285
2286 2286 return revisions, files
2287 2287
2288 2288 def stream_in(self, remote, requirements):
2289 2289 lock = self.lock()
2290 2290 try:
2291 2291 # Save remote branchmap. We will use it later
2292 2292 # to speed up branchcache creation
2293 2293 rbranchmap = None
2294 2294 if remote.capable("branchmap"):
2295 2295 rbranchmap = remote.branchmap()
2296 2296
2297 2297 fp = remote.stream_out()
2298 2298 l = fp.readline()
2299 2299 try:
2300 2300 resp = int(l)
2301 2301 except ValueError:
2302 2302 raise error.ResponseError(
2303 2303 _('unexpected response from remote server:'), l)
2304 2304 if resp == 1:
2305 2305 raise util.Abort(_('operation forbidden by server'))
2306 2306 elif resp == 2:
2307 2307 raise util.Abort(_('locking the remote repository failed'))
2308 2308 elif resp != 0:
2309 2309 raise util.Abort(_('the server sent an unknown error code'))
2310 2310 self.ui.status(_('streaming all changes\n'))
2311 2311 l = fp.readline()
2312 2312 try:
2313 2313 total_files, total_bytes = map(int, l.split(' ', 1))
2314 2314 except (ValueError, TypeError):
2315 2315 raise error.ResponseError(
2316 2316 _('unexpected response from remote server:'), l)
2317 2317 self.ui.status(_('%d files to transfer, %s of data\n') %
2318 2318 (total_files, util.bytecount(total_bytes)))
2319 2319 handled_bytes = 0
2320 2320 self.ui.progress(_('clone'), 0, total=total_bytes)
2321 2321 start = time.time()
2322 2322 for i in xrange(total_files):
2323 2323 # XXX doesn't support '\n' or '\r' in filenames
2324 2324 l = fp.readline()
2325 2325 try:
2326 2326 name, size = l.split('\0', 1)
2327 2327 size = int(size)
2328 2328 except (ValueError, TypeError):
2329 2329 raise error.ResponseError(
2330 2330 _('unexpected response from remote server:'), l)
2331 2331 if self.ui.debugflag:
2332 2332 self.ui.debug('adding %s (%s)\n' %
2333 2333 (name, util.bytecount(size)))
2334 2334 # for backwards compat, name was partially encoded
2335 2335 ofp = self.sopener(store.decodedir(name), 'w')
2336 2336 for chunk in util.filechunkiter(fp, limit=size):
2337 2337 handled_bytes += len(chunk)
2338 2338 self.ui.progress(_('clone'), handled_bytes,
2339 2339 total=total_bytes)
2340 2340 ofp.write(chunk)
2341 2341 ofp.close()
2342 2342 elapsed = time.time() - start
2343 2343 if elapsed <= 0:
2344 2344 elapsed = 0.001
2345 2345 self.ui.progress(_('clone'), None)
2346 2346 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2347 2347 (util.bytecount(total_bytes), elapsed,
2348 2348 util.bytecount(total_bytes / elapsed)))
2349 2349
2350 2350 # new requirements = old non-format requirements +
2351 2351 # new format-related
2352 2352 # requirements from the streamed-in repository
2353 2353 requirements.update(set(self.requirements) - self.supportedformats)
2354 2354 self._applyrequirements(requirements)
2355 2355 self._writerequirements()
2356 2356
2357 2357 if rbranchmap:
2358 2358 rbheads = []
2359 2359 for bheads in rbranchmap.itervalues():
2360 2360 rbheads.extend(bheads)
2361 2361
2362 2362 if rbheads:
2363 2363 rtiprev = max((int(self.changelog.rev(node))
2364 2364 for node in rbheads))
2365 2365 cache = branchmap.branchcache(rbranchmap,
2366 2366 self[rtiprev].node(),
2367 2367 rtiprev)
2368 2368 # Try to stick it as low as possible
2369 2369 # filter above served are unlikely to be fetch from a clone
2370 2370 for candidate in ('base', 'immutable', 'served'):
2371 2371 rview = self.filtered(candidate)
2372 2372 if cache.validfor(rview):
2373 2373 self._branchcaches[candidate] = cache
2374 2374 cache.write(rview)
2375 2375 break
2376 2376 self.invalidate()
2377 2377 return len(self.heads()) + 1
2378 2378 finally:
2379 2379 lock.release()
2380 2380
2381 2381 def clone(self, remote, heads=[], stream=False):
2382 2382 '''clone remote repository.
2383 2383
2384 2384 keyword arguments:
2385 2385 heads: list of revs to clone (forces use of pull)
2386 2386 stream: use streaming clone if possible'''
2387 2387
2388 2388 # now, all clients that can request uncompressed clones can
2389 2389 # read repo formats supported by all servers that can serve
2390 2390 # them.
2391 2391
2392 2392 # if revlog format changes, client will have to check version
2393 2393 # and format flags on "stream" capability, and use
2394 2394 # uncompressed only if compatible.
2395 2395
2396 2396 if not stream:
2397 2397 # if the server explicitly prefers to stream (for fast LANs)
2398 2398 stream = remote.capable('stream-preferred')
2399 2399
2400 2400 if stream and not heads:
2401 2401 # 'stream' means remote revlog format is revlogv1 only
2402 2402 if remote.capable('stream'):
2403 2403 return self.stream_in(remote, set(('revlogv1',)))
2404 2404 # otherwise, 'streamreqs' contains the remote revlog format
2405 2405 streamreqs = remote.capable('streamreqs')
2406 2406 if streamreqs:
2407 2407 streamreqs = set(streamreqs.split(','))
2408 2408 # if we support it, stream in and adjust our requirements
2409 2409 if not streamreqs - self.supportedformats:
2410 2410 return self.stream_in(remote, streamreqs)
2411 2411 return self.pull(remote, heads)
2412 2412
2413 2413 def pushkey(self, namespace, key, old, new):
2414 2414 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2415 2415 old=old, new=new)
2416 2416 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2417 2417 ret = pushkey.push(self, namespace, key, old, new)
2418 2418 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2419 2419 ret=ret)
2420 2420 return ret
2421 2421
2422 2422 def listkeys(self, namespace):
2423 2423 self.hook('prelistkeys', throw=True, namespace=namespace)
2424 2424 self.ui.debug('listing keys for "%s"\n' % namespace)
2425 2425 values = pushkey.list(self, namespace)
2426 2426 self.hook('listkeys', namespace=namespace, values=values)
2427 2427 return values
2428 2428
2429 2429 def debugwireargs(self, one, two, three=None, four=None, five=None):
2430 2430 '''used to test argument passing over the wire'''
2431 2431 return "%s %s %s %s %s" % (one, two, three, four, five)
2432 2432
2433 2433 def savecommitmessage(self, text):
2434 2434 fp = self.opener('last-message.txt', 'wb')
2435 2435 try:
2436 2436 fp.write(text)
2437 2437 finally:
2438 2438 fp.close()
2439 2439 return self.pathto(fp.name[len(self.root) + 1:])
2440 2440
2441 2441 # used to avoid circular references so destructors work
2442 2442 def aftertrans(files):
2443 2443 renamefiles = [tuple(t) for t in files]
2444 2444 def a():
2445 2445 for vfs, src, dest in renamefiles:
2446 2446 try:
2447 2447 vfs.rename(src, dest)
2448 2448 except OSError: # journal file does not yet exist
2449 2449 pass
2450 2450 return a
2451 2451
2452 2452 def undoname(fn):
2453 2453 base, name = os.path.split(fn)
2454 2454 assert name.startswith('journal')
2455 2455 return os.path.join(base, name.replace('journal', 'undo', 1))
2456 2456
2457 2457 def instance(ui, path, create):
2458 2458 return localrepository(ui, util.urllocalpath(path), create)
2459 2459
2460 2460 def islocal(path):
2461 2461 return True
@@ -1,358 +1,358 b''
1 1 # match.py - filename matching
2 2 #
3 3 # Copyright 2008, 2009 Matt Mackall <mpm@selenic.com> and others
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import re
9 import scmutil, util, fileset
9 import util, fileset, pathutil
10 10 from i18n import _
11 11
12 12 def _rematcher(pat):
13 13 m = util.compilere(pat)
14 14 try:
15 15 # slightly faster, provided by facebook's re2 bindings
16 16 return m.test_match
17 17 except AttributeError:
18 18 return m.match
19 19
20 20 def _expandsets(pats, ctx):
21 21 '''convert set: patterns into a list of files in the given context'''
22 22 fset = set()
23 23 other = []
24 24
25 25 for kind, expr in pats:
26 26 if kind == 'set':
27 27 if not ctx:
28 28 raise util.Abort("fileset expression with no context")
29 29 s = fileset.getfileset(ctx, expr)
30 30 fset.update(s)
31 31 continue
32 32 other.append((kind, expr))
33 33 return fset, other
34 34
35 35 class match(object):
36 36 def __init__(self, root, cwd, patterns, include=[], exclude=[],
37 37 default='glob', exact=False, auditor=None, ctx=None):
38 38 """build an object to match a set of file patterns
39 39
40 40 arguments:
41 41 root - the canonical root of the tree you're matching against
42 42 cwd - the current working directory, if relevant
43 43 patterns - patterns to find
44 44 include - patterns to include
45 45 exclude - patterns to exclude
46 46 default - if a pattern in names has no explicit type, assume this one
47 47 exact - patterns are actually literals
48 48
49 49 a pattern is one of:
50 50 'glob:<glob>' - a glob relative to cwd
51 51 're:<regexp>' - a regular expression
52 52 'path:<path>' - a path relative to repository root
53 53 'relglob:<glob>' - an unrooted glob (*.c matches C files in all dirs)
54 54 'relpath:<path>' - a path relative to cwd
55 55 'relre:<regexp>' - a regexp that needn't match the start of a name
56 56 'set:<fileset>' - a fileset expression
57 57 '<something>' - a pattern of the specified default type
58 58 """
59 59
60 60 self._root = root
61 61 self._cwd = cwd
62 62 self._files = []
63 63 self._anypats = bool(include or exclude)
64 64 self._ctx = ctx
65 65 self._always = False
66 66
67 67 if include:
68 68 pats = _normalize(include, 'glob', root, cwd, auditor)
69 69 self.includepat, im = _buildmatch(ctx, pats, '(?:/|$)')
70 70 if exclude:
71 71 pats = _normalize(exclude, 'glob', root, cwd, auditor)
72 72 self.excludepat, em = _buildmatch(ctx, pats, '(?:/|$)')
73 73 if exact:
74 74 if isinstance(patterns, list):
75 75 self._files = patterns
76 76 else:
77 77 self._files = list(patterns)
78 78 pm = self.exact
79 79 elif patterns:
80 80 pats = _normalize(patterns, default, root, cwd, auditor)
81 81 self._files = _roots(pats)
82 82 self._anypats = self._anypats or _anypats(pats)
83 83 self.patternspat, pm = _buildmatch(ctx, pats, '$')
84 84
85 85 if patterns or exact:
86 86 if include:
87 87 if exclude:
88 88 m = lambda f: im(f) and not em(f) and pm(f)
89 89 else:
90 90 m = lambda f: im(f) and pm(f)
91 91 else:
92 92 if exclude:
93 93 m = lambda f: not em(f) and pm(f)
94 94 else:
95 95 m = pm
96 96 else:
97 97 if include:
98 98 if exclude:
99 99 m = lambda f: im(f) and not em(f)
100 100 else:
101 101 m = im
102 102 else:
103 103 if exclude:
104 104 m = lambda f: not em(f)
105 105 else:
106 106 m = lambda f: True
107 107 self._always = True
108 108
109 109 self.matchfn = m
110 110 self._fmap = set(self._files)
111 111
112 112 def __call__(self, fn):
113 113 return self.matchfn(fn)
114 114 def __iter__(self):
115 115 for f in self._files:
116 116 yield f
117 117 def bad(self, f, msg):
118 118 '''callback for each explicit file that can't be
119 119 found/accessed, with an error message
120 120 '''
121 121 pass
122 122 # If this is set, it will be called when an explicitly listed directory is
123 123 # visited.
124 124 explicitdir = None
125 125 # If this is set, it will be called when a directory discovered by recursive
126 126 # traversal is visited.
127 127 traversedir = None
128 128 def missing(self, f):
129 129 pass
130 130 def exact(self, f):
131 131 return f in self._fmap
132 132 def rel(self, f):
133 133 return util.pathto(self._root, self._cwd, f)
134 134 def files(self):
135 135 return self._files
136 136 def anypats(self):
137 137 return self._anypats
138 138 def always(self):
139 139 return self._always
140 140
141 141 class exact(match):
142 142 def __init__(self, root, cwd, files):
143 143 match.__init__(self, root, cwd, files, exact=True)
144 144
145 145 class always(match):
146 146 def __init__(self, root, cwd):
147 147 match.__init__(self, root, cwd, [])
148 148 self._always = True
149 149
150 150 class narrowmatcher(match):
151 151 """Adapt a matcher to work on a subdirectory only.
152 152
153 153 The paths are remapped to remove/insert the path as needed:
154 154
155 155 >>> m1 = match('root', '', ['a.txt', 'sub/b.txt'])
156 156 >>> m2 = narrowmatcher('sub', m1)
157 157 >>> bool(m2('a.txt'))
158 158 False
159 159 >>> bool(m2('b.txt'))
160 160 True
161 161 >>> bool(m2.matchfn('a.txt'))
162 162 False
163 163 >>> bool(m2.matchfn('b.txt'))
164 164 True
165 165 >>> m2.files()
166 166 ['b.txt']
167 167 >>> m2.exact('b.txt')
168 168 True
169 169 >>> m2.rel('b.txt')
170 170 'b.txt'
171 171 >>> def bad(f, msg):
172 172 ... print "%s: %s" % (f, msg)
173 173 >>> m1.bad = bad
174 174 >>> m2.bad('x.txt', 'No such file')
175 175 sub/x.txt: No such file
176 176 """
177 177
178 178 def __init__(self, path, matcher):
179 179 self._root = matcher._root
180 180 self._cwd = matcher._cwd
181 181 self._path = path
182 182 self._matcher = matcher
183 183 self._always = matcher._always
184 184
185 185 self._files = [f[len(path) + 1:] for f in matcher._files
186 186 if f.startswith(path + "/")]
187 187 self._anypats = matcher._anypats
188 188 self.matchfn = lambda fn: matcher.matchfn(self._path + "/" + fn)
189 189 self._fmap = set(self._files)
190 190
191 191 def bad(self, f, msg):
192 192 self._matcher.bad(self._path + "/" + f, msg)
193 193
194 194 def patkind(pat):
195 195 return _patsplit(pat, None)[0]
196 196
197 197 def _patsplit(pat, default):
198 198 """Split a string into an optional pattern kind prefix and the
199 199 actual pattern."""
200 200 if ':' in pat:
201 201 kind, val = pat.split(':', 1)
202 202 if kind in ('re', 'glob', 'path', 'relglob', 'relpath', 'relre',
203 203 'listfile', 'listfile0', 'set'):
204 204 return kind, val
205 205 return default, pat
206 206
207 207 def _globre(pat):
208 208 "convert a glob pattern into a regexp"
209 209 i, n = 0, len(pat)
210 210 res = ''
211 211 group = 0
212 212 escape = re.escape
213 213 def peek():
214 214 return i < n and pat[i]
215 215 while i < n:
216 216 c = pat[i]
217 217 i += 1
218 218 if c not in '*?[{},\\':
219 219 res += escape(c)
220 220 elif c == '*':
221 221 if peek() == '*':
222 222 i += 1
223 223 res += '.*'
224 224 else:
225 225 res += '[^/]*'
226 226 elif c == '?':
227 227 res += '.'
228 228 elif c == '[':
229 229 j = i
230 230 if j < n and pat[j] in '!]':
231 231 j += 1
232 232 while j < n and pat[j] != ']':
233 233 j += 1
234 234 if j >= n:
235 235 res += '\\['
236 236 else:
237 237 stuff = pat[i:j].replace('\\','\\\\')
238 238 i = j + 1
239 239 if stuff[0] == '!':
240 240 stuff = '^' + stuff[1:]
241 241 elif stuff[0] == '^':
242 242 stuff = '\\' + stuff
243 243 res = '%s[%s]' % (res, stuff)
244 244 elif c == '{':
245 245 group += 1
246 246 res += '(?:'
247 247 elif c == '}' and group:
248 248 res += ')'
249 249 group -= 1
250 250 elif c == ',' and group:
251 251 res += '|'
252 252 elif c == '\\':
253 253 p = peek()
254 254 if p:
255 255 i += 1
256 256 res += escape(p)
257 257 else:
258 258 res += escape(c)
259 259 else:
260 260 res += escape(c)
261 261 return res
262 262
263 263 def _regex(kind, name, tail):
264 264 '''convert a pattern into a regular expression'''
265 265 if not name:
266 266 return ''
267 267 if kind == 're':
268 268 return name
269 269 elif kind == 'path':
270 270 return '^' + re.escape(name) + '(?:/|$)'
271 271 elif kind == 'relglob':
272 272 return '(?:|.*/)' + _globre(name) + tail
273 273 elif kind == 'relpath':
274 274 return re.escape(name) + '(?:/|$)'
275 275 elif kind == 'relre':
276 276 if name.startswith('^'):
277 277 return name
278 278 return '.*' + name
279 279 return _globre(name) + tail
280 280
281 281 def _buildmatch(ctx, pats, tail):
282 282 fset, pats = _expandsets(pats, ctx)
283 283 if not pats:
284 284 return "", fset.__contains__
285 285
286 286 pat, mf = _buildregexmatch(pats, tail)
287 287 if fset:
288 288 return pat, lambda f: f in fset or mf(f)
289 289 return pat, mf
290 290
291 291 def _buildregexmatch(pats, tail):
292 292 """build a matching function from a set of patterns"""
293 293 try:
294 294 pat = '(?:%s)' % '|'.join([_regex(k, p, tail) for (k, p) in pats])
295 295 if len(pat) > 20000:
296 296 raise OverflowError
297 297 return pat, _rematcher(pat)
298 298 except OverflowError:
299 299 # We're using a Python with a tiny regex engine and we
300 300 # made it explode, so we'll divide the pattern list in two
301 301 # until it works
302 302 l = len(pats)
303 303 if l < 2:
304 304 raise
305 305 pata, a = _buildregexmatch(pats[:l//2], tail)
306 306 patb, b = _buildregexmatch(pats[l//2:], tail)
307 307 return pat, lambda s: a(s) or b(s)
308 308 except re.error:
309 309 for k, p in pats:
310 310 try:
311 311 _rematcher('(?:%s)' % _regex(k, p, tail))
312 312 except re.error:
313 313 raise util.Abort(_("invalid pattern (%s): %s") % (k, p))
314 314 raise util.Abort(_("invalid pattern"))
315 315
316 316 def _normalize(names, default, root, cwd, auditor):
317 317 pats = []
318 318 for kind, name in [_patsplit(p, default) for p in names]:
319 319 if kind in ('glob', 'relpath'):
320 name = scmutil.canonpath(root, cwd, name, auditor)
320 name = pathutil.canonpath(root, cwd, name, auditor)
321 321 elif kind in ('relglob', 'path'):
322 322 name = util.normpath(name)
323 323 elif kind in ('listfile', 'listfile0'):
324 324 try:
325 325 files = util.readfile(name)
326 326 if kind == 'listfile0':
327 327 files = files.split('\0')
328 328 else:
329 329 files = files.splitlines()
330 330 files = [f for f in files if f]
331 331 except EnvironmentError:
332 332 raise util.Abort(_("unable to read file list (%s)") % name)
333 333 pats += _normalize(files, default, root, cwd, auditor)
334 334 continue
335 335
336 336 pats.append((kind, name))
337 337 return pats
338 338
339 339 def _roots(patterns):
340 340 r = []
341 341 for kind, name in patterns:
342 342 if kind == 'glob': # find the non-glob prefix
343 343 root = []
344 344 for p in name.split('/'):
345 345 if '[' in p or '{' in p or '*' in p or '?' in p:
346 346 break
347 347 root.append(p)
348 348 r.append('/'.join(root) or '.')
349 349 elif kind in ('relpath', 'path'):
350 350 r.append(name or '.')
351 351 else: # relglob, re, relre
352 352 r.append('.')
353 353 return r
354 354
355 355 def _anypats(patterns):
356 356 for kind, name in patterns:
357 357 if kind in ('glob', 're', 'relglob', 'relre', 'set'):
358 358 return True
@@ -1,1025 +1,886 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 from mercurial.node import nullrev
10 10 import util, error, osutil, revset, similar, encoding, phases, parsers
11 import pathutil
11 12 import match as matchmod
12 import os, errno, re, stat, glob
13 import os, errno, re, glob
13 14
14 15 if os.name == 'nt':
15 16 import scmwindows as scmplatform
16 17 else:
17 18 import scmposix as scmplatform
18 19
19 20 systemrcpath = scmplatform.systemrcpath
20 21 userrcpath = scmplatform.userrcpath
21 22
22 23 def nochangesfound(ui, repo, excluded=None):
23 24 '''Report no changes for push/pull, excluded is None or a list of
24 25 nodes excluded from the push/pull.
25 26 '''
26 27 secretlist = []
27 28 if excluded:
28 29 for n in excluded:
29 30 if n not in repo:
30 31 # discovery should not have included the filtered revision,
31 32 # we have to explicitly exclude it until discovery is cleanup.
32 33 continue
33 34 ctx = repo[n]
34 35 if ctx.phase() >= phases.secret and not ctx.extinct():
35 36 secretlist.append(n)
36 37
37 38 if secretlist:
38 39 ui.status(_("no changes found (ignored %d secret changesets)\n")
39 40 % len(secretlist))
40 41 else:
41 42 ui.status(_("no changes found\n"))
42 43
43 44 def checknewlabel(repo, lbl, kind):
44 45 # Do not use the "kind" parameter in ui output.
45 46 # It makes strings difficult to translate.
46 47 if lbl in ['tip', '.', 'null']:
47 48 raise util.Abort(_("the name '%s' is reserved") % lbl)
48 49 for c in (':', '\0', '\n', '\r'):
49 50 if c in lbl:
50 51 raise util.Abort(_("%r cannot be used in a name") % c)
51 52 try:
52 53 int(lbl)
53 54 raise util.Abort(_("cannot use an integer as a name"))
54 55 except ValueError:
55 56 pass
56 57
57 58 def checkfilename(f):
58 59 '''Check that the filename f is an acceptable filename for a tracked file'''
59 60 if '\r' in f or '\n' in f:
60 61 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
61 62
62 63 def checkportable(ui, f):
63 64 '''Check if filename f is portable and warn or abort depending on config'''
64 65 checkfilename(f)
65 66 abort, warn = checkportabilityalert(ui)
66 67 if abort or warn:
67 68 msg = util.checkwinfilename(f)
68 69 if msg:
69 70 msg = "%s: %r" % (msg, f)
70 71 if abort:
71 72 raise util.Abort(msg)
72 73 ui.warn(_("warning: %s\n") % msg)
73 74
74 75 def checkportabilityalert(ui):
75 76 '''check if the user's config requests nothing, a warning, or abort for
76 77 non-portable filenames'''
77 78 val = ui.config('ui', 'portablefilenames', 'warn')
78 79 lval = val.lower()
79 80 bval = util.parsebool(val)
80 81 abort = os.name == 'nt' or lval == 'abort'
81 82 warn = bval or lval == 'warn'
82 83 if bval is None and not (warn or abort or lval == 'ignore'):
83 84 raise error.ConfigError(
84 85 _("ui.portablefilenames value is invalid ('%s')") % val)
85 86 return abort, warn
86 87
87 88 class casecollisionauditor(object):
88 89 def __init__(self, ui, abort, dirstate):
89 90 self._ui = ui
90 91 self._abort = abort
91 92 allfiles = '\0'.join(dirstate._map)
92 93 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
93 94 self._dirstate = dirstate
94 95 # The purpose of _newfiles is so that we don't complain about
95 96 # case collisions if someone were to call this object with the
96 97 # same filename twice.
97 98 self._newfiles = set()
98 99
99 100 def __call__(self, f):
100 101 if f in self._newfiles:
101 102 return
102 103 fl = encoding.lower(f)
103 104 if fl in self._loweredfiles and f not in self._dirstate:
104 105 msg = _('possible case-folding collision for %s') % f
105 106 if self._abort:
106 107 raise util.Abort(msg)
107 108 self._ui.warn(_("warning: %s\n") % msg)
108 109 self._loweredfiles.add(fl)
109 110 self._newfiles.add(f)
110 111
111 class pathauditor(object):
112 '''ensure that a filesystem path contains no banned components.
113 the following properties of a path are checked:
114
115 - ends with a directory separator
116 - under top-level .hg
117 - starts at the root of a windows drive
118 - contains ".."
119 - traverses a symlink (e.g. a/symlink_here/b)
120 - inside a nested repository (a callback can be used to approve
121 some nested repositories, e.g., subrepositories)
122 '''
123
124 def __init__(self, root, callback=None):
125 self.audited = set()
126 self.auditeddir = set()
127 self.root = root
128 self.callback = callback
129 if os.path.lexists(root) and not util.checkcase(root):
130 self.normcase = util.normcase
131 else:
132 self.normcase = lambda x: x
133
134 def __call__(self, path):
135 '''Check the relative path.
136 path may contain a pattern (e.g. foodir/**.txt)'''
137
138 path = util.localpath(path)
139 normpath = self.normcase(path)
140 if normpath in self.audited:
141 return
142 # AIX ignores "/" at end of path, others raise EISDIR.
143 if util.endswithsep(path):
144 raise util.Abort(_("path ends in directory separator: %s") % path)
145 parts = util.splitpath(path)
146 if (os.path.splitdrive(path)[0]
147 or parts[0].lower() in ('.hg', '.hg.', '')
148 or os.pardir in parts):
149 raise util.Abort(_("path contains illegal component: %s") % path)
150 if '.hg' in path.lower():
151 lparts = [p.lower() for p in parts]
152 for p in '.hg', '.hg.':
153 if p in lparts[1:]:
154 pos = lparts.index(p)
155 base = os.path.join(*parts[:pos])
156 raise util.Abort(_("path '%s' is inside nested repo %r")
157 % (path, base))
158
159 normparts = util.splitpath(normpath)
160 assert len(parts) == len(normparts)
161
162 parts.pop()
163 normparts.pop()
164 prefixes = []
165 while parts:
166 prefix = os.sep.join(parts)
167 normprefix = os.sep.join(normparts)
168 if normprefix in self.auditeddir:
169 break
170 curpath = os.path.join(self.root, prefix)
171 try:
172 st = os.lstat(curpath)
173 except OSError, err:
174 # EINVAL can be raised as invalid path syntax under win32.
175 # They must be ignored for patterns can be checked too.
176 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
177 raise
178 else:
179 if stat.S_ISLNK(st.st_mode):
180 raise util.Abort(
181 _('path %r traverses symbolic link %r')
182 % (path, prefix))
183 elif (stat.S_ISDIR(st.st_mode) and
184 os.path.isdir(os.path.join(curpath, '.hg'))):
185 if not self.callback or not self.callback(curpath):
186 raise util.Abort(_("path '%s' is inside nested "
187 "repo %r")
188 % (path, prefix))
189 prefixes.append(normprefix)
190 parts.pop()
191 normparts.pop()
192
193 self.audited.add(normpath)
194 # only add prefixes to the cache after checking everything: we don't
195 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
196 self.auditeddir.update(prefixes)
197
198 def check(self, path):
199 try:
200 self(path)
201 return True
202 except (OSError, util.Abort):
203 return False
204
205 112 class abstractvfs(object):
206 113 """Abstract base class; cannot be instantiated"""
207 114
208 115 def __init__(self, *args, **kwargs):
209 116 '''Prevent instantiation; don't call this from subclasses.'''
210 117 raise NotImplementedError('attempted instantiating ' + str(type(self)))
211 118
212 119 def tryread(self, path):
213 120 '''gracefully return an empty string for missing files'''
214 121 try:
215 122 return self.read(path)
216 123 except IOError, inst:
217 124 if inst.errno != errno.ENOENT:
218 125 raise
219 126 return ""
220 127
221 128 def open(self, path, mode="r", text=False, atomictemp=False):
222 129 self.open = self.__call__
223 130 return self.__call__(path, mode, text, atomictemp)
224 131
225 132 def read(self, path):
226 133 fp = self(path, 'rb')
227 134 try:
228 135 return fp.read()
229 136 finally:
230 137 fp.close()
231 138
232 139 def write(self, path, data):
233 140 fp = self(path, 'wb')
234 141 try:
235 142 return fp.write(data)
236 143 finally:
237 144 fp.close()
238 145
239 146 def append(self, path, data):
240 147 fp = self(path, 'ab')
241 148 try:
242 149 return fp.write(data)
243 150 finally:
244 151 fp.close()
245 152
246 153 def exists(self, path=None):
247 154 return os.path.exists(self.join(path))
248 155
249 156 def fstat(self, fp):
250 157 return util.fstat(fp)
251 158
252 159 def isdir(self, path=None):
253 160 return os.path.isdir(self.join(path))
254 161
255 162 def islink(self, path=None):
256 163 return os.path.islink(self.join(path))
257 164
258 165 def lstat(self, path=None):
259 166 return os.lstat(self.join(path))
260 167
261 168 def makedir(self, path=None, notindexed=True):
262 169 return util.makedir(self.join(path), notindexed)
263 170
264 171 def makedirs(self, path=None, mode=None):
265 172 return util.makedirs(self.join(path), mode)
266 173
267 174 def mkdir(self, path=None):
268 175 return os.mkdir(self.join(path))
269 176
270 177 def readdir(self, path=None, stat=None, skip=None):
271 178 return osutil.listdir(self.join(path), stat, skip)
272 179
273 180 def rename(self, src, dst):
274 181 return util.rename(self.join(src), self.join(dst))
275 182
276 183 def readlink(self, path):
277 184 return os.readlink(self.join(path))
278 185
279 186 def setflags(self, path, l, x):
280 187 return util.setflags(self.join(path), l, x)
281 188
282 189 def stat(self, path=None):
283 190 return os.stat(self.join(path))
284 191
285 192 def unlink(self, path=None):
286 193 return util.unlink(self.join(path))
287 194
288 195 def utime(self, path=None, t=None):
289 196 return os.utime(self.join(path), t)
290 197
291 198 class vfs(abstractvfs):
292 199 '''Operate files relative to a base directory
293 200
294 201 This class is used to hide the details of COW semantics and
295 202 remote file access from higher level code.
296 203 '''
297 204 def __init__(self, base, audit=True, expandpath=False, realpath=False):
298 205 if expandpath:
299 206 base = util.expandpath(base)
300 207 if realpath:
301 208 base = os.path.realpath(base)
302 209 self.base = base
303 210 self._setmustaudit(audit)
304 211 self.createmode = None
305 212 self._trustnlink = None
306 213
307 214 def _getmustaudit(self):
308 215 return self._audit
309 216
310 217 def _setmustaudit(self, onoff):
311 218 self._audit = onoff
312 219 if onoff:
313 self.audit = pathauditor(self.base)
220 self.audit = pathutil.pathauditor(self.base)
314 221 else:
315 222 self.audit = util.always
316 223
317 224 mustaudit = property(_getmustaudit, _setmustaudit)
318 225
319 226 @util.propertycache
320 227 def _cansymlink(self):
321 228 return util.checklink(self.base)
322 229
323 230 @util.propertycache
324 231 def _chmod(self):
325 232 return util.checkexec(self.base)
326 233
327 234 def _fixfilemode(self, name):
328 235 if self.createmode is None or not self._chmod:
329 236 return
330 237 os.chmod(name, self.createmode & 0666)
331 238
332 239 def __call__(self, path, mode="r", text=False, atomictemp=False):
333 240 if self._audit:
334 241 r = util.checkosfilename(path)
335 242 if r:
336 243 raise util.Abort("%s: %r" % (r, path))
337 244 self.audit(path)
338 245 f = self.join(path)
339 246
340 247 if not text and "b" not in mode:
341 248 mode += "b" # for that other OS
342 249
343 250 nlink = -1
344 251 if mode not in ('r', 'rb'):
345 252 dirname, basename = util.split(f)
346 253 # If basename is empty, then the path is malformed because it points
347 254 # to a directory. Let the posixfile() call below raise IOError.
348 255 if basename:
349 256 if atomictemp:
350 257 util.ensuredirs(dirname, self.createmode)
351 258 return util.atomictempfile(f, mode, self.createmode)
352 259 try:
353 260 if 'w' in mode:
354 261 util.unlink(f)
355 262 nlink = 0
356 263 else:
357 264 # nlinks() may behave differently for files on Windows
358 265 # shares if the file is open.
359 266 fd = util.posixfile(f)
360 267 nlink = util.nlinks(f)
361 268 if nlink < 1:
362 269 nlink = 2 # force mktempcopy (issue1922)
363 270 fd.close()
364 271 except (OSError, IOError), e:
365 272 if e.errno != errno.ENOENT:
366 273 raise
367 274 nlink = 0
368 275 util.ensuredirs(dirname, self.createmode)
369 276 if nlink > 0:
370 277 if self._trustnlink is None:
371 278 self._trustnlink = nlink > 1 or util.checknlink(f)
372 279 if nlink > 1 or not self._trustnlink:
373 280 util.rename(util.mktempcopy(f), f)
374 281 fp = util.posixfile(f, mode)
375 282 if nlink == 0:
376 283 self._fixfilemode(f)
377 284 return fp
378 285
379 286 def symlink(self, src, dst):
380 287 self.audit(dst)
381 288 linkname = self.join(dst)
382 289 try:
383 290 os.unlink(linkname)
384 291 except OSError:
385 292 pass
386 293
387 294 util.ensuredirs(os.path.dirname(linkname), self.createmode)
388 295
389 296 if self._cansymlink:
390 297 try:
391 298 os.symlink(src, linkname)
392 299 except OSError, err:
393 300 raise OSError(err.errno, _('could not symlink to %r: %s') %
394 301 (src, err.strerror), linkname)
395 302 else:
396 303 self.write(dst, src)
397 304
398 305 def join(self, path):
399 306 if path:
400 307 return os.path.join(self.base, path)
401 308 else:
402 309 return self.base
403 310
404 311 opener = vfs
405 312
406 313 class auditvfs(object):
407 314 def __init__(self, vfs):
408 315 self.vfs = vfs
409 316
410 317 def _getmustaudit(self):
411 318 return self.vfs.mustaudit
412 319
413 320 def _setmustaudit(self, onoff):
414 321 self.vfs.mustaudit = onoff
415 322
416 323 mustaudit = property(_getmustaudit, _setmustaudit)
417 324
418 325 class filtervfs(abstractvfs, auditvfs):
419 326 '''Wrapper vfs for filtering filenames with a function.'''
420 327
421 328 def __init__(self, vfs, filter):
422 329 auditvfs.__init__(self, vfs)
423 330 self._filter = filter
424 331
425 332 def __call__(self, path, *args, **kwargs):
426 333 return self.vfs(self._filter(path), *args, **kwargs)
427 334
428 335 def join(self, path):
429 336 if path:
430 337 return self.vfs.join(self._filter(path))
431 338 else:
432 339 return self.vfs.join(path)
433 340
434 341 filteropener = filtervfs
435 342
436 343 class readonlyvfs(abstractvfs, auditvfs):
437 344 '''Wrapper vfs preventing any writing.'''
438 345
439 346 def __init__(self, vfs):
440 347 auditvfs.__init__(self, vfs)
441 348
442 349 def __call__(self, path, mode='r', *args, **kw):
443 350 if mode not in ('r', 'rb'):
444 351 raise util.Abort('this vfs is read only')
445 352 return self.vfs(path, mode, *args, **kw)
446 353
447 354
448 def canonpath(root, cwd, myname, auditor=None):
449 '''return the canonical path of myname, given cwd and root'''
450 if util.endswithsep(root):
451 rootsep = root
452 else:
453 rootsep = root + os.sep
454 name = myname
455 if not os.path.isabs(name):
456 name = os.path.join(root, cwd, name)
457 name = os.path.normpath(name)
458 if auditor is None:
459 auditor = pathauditor(root)
460 if name != rootsep and name.startswith(rootsep):
461 name = name[len(rootsep):]
462 auditor(name)
463 return util.pconvert(name)
464 elif name == root:
465 return ''
466 else:
467 # Determine whether `name' is in the hierarchy at or beneath `root',
468 # by iterating name=dirname(name) until that causes no change (can't
469 # check name == '/', because that doesn't work on windows). The list
470 # `rel' holds the reversed list of components making up the relative
471 # file name we want.
472 rel = []
473 while True:
474 try:
475 s = util.samefile(name, root)
476 except OSError:
477 s = False
478 if s:
479 if not rel:
480 # name was actually the same as root (maybe a symlink)
481 return ''
482 rel.reverse()
483 name = os.path.join(*rel)
484 auditor(name)
485 return util.pconvert(name)
486 dirname, basename = util.split(name)
487 rel.append(basename)
488 if dirname == name:
489 break
490 name = dirname
491
492 raise util.Abort(_("%s not under root '%s'") % (myname, root))
493
494 355 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
495 356 '''yield every hg repository under path, always recursively.
496 357 The recurse flag will only control recursion into repo working dirs'''
497 358 def errhandler(err):
498 359 if err.filename == path:
499 360 raise err
500 361 samestat = getattr(os.path, 'samestat', None)
501 362 if followsym and samestat is not None:
502 363 def adddir(dirlst, dirname):
503 364 match = False
504 365 dirstat = os.stat(dirname)
505 366 for lstdirstat in dirlst:
506 367 if samestat(dirstat, lstdirstat):
507 368 match = True
508 369 break
509 370 if not match:
510 371 dirlst.append(dirstat)
511 372 return not match
512 373 else:
513 374 followsym = False
514 375
515 376 if (seen_dirs is None) and followsym:
516 377 seen_dirs = []
517 378 adddir(seen_dirs, path)
518 379 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
519 380 dirs.sort()
520 381 if '.hg' in dirs:
521 382 yield root # found a repository
522 383 qroot = os.path.join(root, '.hg', 'patches')
523 384 if os.path.isdir(os.path.join(qroot, '.hg')):
524 385 yield qroot # we have a patch queue repo here
525 386 if recurse:
526 387 # avoid recursing inside the .hg directory
527 388 dirs.remove('.hg')
528 389 else:
529 390 dirs[:] = [] # don't descend further
530 391 elif followsym:
531 392 newdirs = []
532 393 for d in dirs:
533 394 fname = os.path.join(root, d)
534 395 if adddir(seen_dirs, fname):
535 396 if os.path.islink(fname):
536 397 for hgname in walkrepos(fname, True, seen_dirs):
537 398 yield hgname
538 399 else:
539 400 newdirs.append(d)
540 401 dirs[:] = newdirs
541 402
542 403 def osrcpath():
543 404 '''return default os-specific hgrc search path'''
544 405 path = systemrcpath()
545 406 path.extend(userrcpath())
546 407 path = [os.path.normpath(f) for f in path]
547 408 return path
548 409
549 410 _rcpath = None
550 411
551 412 def rcpath():
552 413 '''return hgrc search path. if env var HGRCPATH is set, use it.
553 414 for each item in path, if directory, use files ending in .rc,
554 415 else use item.
555 416 make HGRCPATH empty to only look in .hg/hgrc of current repo.
556 417 if no HGRCPATH, use default os-specific path.'''
557 418 global _rcpath
558 419 if _rcpath is None:
559 420 if 'HGRCPATH' in os.environ:
560 421 _rcpath = []
561 422 for p in os.environ['HGRCPATH'].split(os.pathsep):
562 423 if not p:
563 424 continue
564 425 p = util.expandpath(p)
565 426 if os.path.isdir(p):
566 427 for f, kind in osutil.listdir(p):
567 428 if f.endswith('.rc'):
568 429 _rcpath.append(os.path.join(p, f))
569 430 else:
570 431 _rcpath.append(p)
571 432 else:
572 433 _rcpath = osrcpath()
573 434 return _rcpath
574 435
575 436 def revsingle(repo, revspec, default='.'):
576 437 if not revspec and revspec != 0:
577 438 return repo[default]
578 439
579 440 l = revrange(repo, [revspec])
580 441 if len(l) < 1:
581 442 raise util.Abort(_('empty revision set'))
582 443 return repo[l[-1]]
583 444
584 445 def revpair(repo, revs):
585 446 if not revs:
586 447 return repo.dirstate.p1(), None
587 448
588 449 l = revrange(repo, revs)
589 450
590 451 if len(l) == 0:
591 452 if revs:
592 453 raise util.Abort(_('empty revision range'))
593 454 return repo.dirstate.p1(), None
594 455
595 456 if len(l) == 1 and len(revs) == 1 and _revrangesep not in revs[0]:
596 457 return repo.lookup(l[0]), None
597 458
598 459 return repo.lookup(l[0]), repo.lookup(l[-1])
599 460
600 461 _revrangesep = ':'
601 462
602 463 def revrange(repo, revs):
603 464 """Yield revision as strings from a list of revision specifications."""
604 465
605 466 def revfix(repo, val, defval):
606 467 if not val and val != 0 and defval is not None:
607 468 return defval
608 469 return repo[val].rev()
609 470
610 471 seen, l = set(), []
611 472 for spec in revs:
612 473 if l and not seen:
613 474 seen = set(l)
614 475 # attempt to parse old-style ranges first to deal with
615 476 # things like old-tag which contain query metacharacters
616 477 try:
617 478 if isinstance(spec, int):
618 479 seen.add(spec)
619 480 l.append(spec)
620 481 continue
621 482
622 483 if _revrangesep in spec:
623 484 start, end = spec.split(_revrangesep, 1)
624 485 start = revfix(repo, start, 0)
625 486 end = revfix(repo, end, len(repo) - 1)
626 487 if end == nullrev and start <= 0:
627 488 start = nullrev
628 489 rangeiter = repo.changelog.revs(start, end)
629 490 if not seen and not l:
630 491 # by far the most common case: revs = ["-1:0"]
631 492 l = list(rangeiter)
632 493 # defer syncing seen until next iteration
633 494 continue
634 495 newrevs = set(rangeiter)
635 496 if seen:
636 497 newrevs.difference_update(seen)
637 498 seen.update(newrevs)
638 499 else:
639 500 seen = newrevs
640 501 l.extend(sorted(newrevs, reverse=start > end))
641 502 continue
642 503 elif spec and spec in repo: # single unquoted rev
643 504 rev = revfix(repo, spec, None)
644 505 if rev in seen:
645 506 continue
646 507 seen.add(rev)
647 508 l.append(rev)
648 509 continue
649 510 except error.RepoLookupError:
650 511 pass
651 512
652 513 # fall through to new-style queries if old-style fails
653 514 m = revset.match(repo.ui, spec)
654 515 dl = [r for r in m(repo, list(repo)) if r not in seen]
655 516 l.extend(dl)
656 517 seen.update(dl)
657 518
658 519 return l
659 520
660 521 def expandpats(pats):
661 522 if not util.expandglobs:
662 523 return list(pats)
663 524 ret = []
664 525 for p in pats:
665 526 kind, name = matchmod._patsplit(p, None)
666 527 if kind is None:
667 528 try:
668 529 globbed = glob.glob(name)
669 530 except re.error:
670 531 globbed = [name]
671 532 if globbed:
672 533 ret.extend(globbed)
673 534 continue
674 535 ret.append(p)
675 536 return ret
676 537
677 538 def matchandpats(ctx, pats=[], opts={}, globbed=False, default='relpath'):
678 539 if pats == ("",):
679 540 pats = []
680 541 if not globbed and default == 'relpath':
681 542 pats = expandpats(pats or [])
682 543
683 544 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
684 545 default)
685 546 def badfn(f, msg):
686 547 ctx._repo.ui.warn("%s: %s\n" % (m.rel(f), msg))
687 548 m.bad = badfn
688 549 return m, pats
689 550
690 551 def match(ctx, pats=[], opts={}, globbed=False, default='relpath'):
691 552 return matchandpats(ctx, pats, opts, globbed, default)[0]
692 553
693 554 def matchall(repo):
694 555 return matchmod.always(repo.root, repo.getcwd())
695 556
696 557 def matchfiles(repo, files):
697 558 return matchmod.exact(repo.root, repo.getcwd(), files)
698 559
699 560 def addremove(repo, pats=[], opts={}, dry_run=None, similarity=None):
700 561 if dry_run is None:
701 562 dry_run = opts.get('dry_run')
702 563 if similarity is None:
703 564 similarity = float(opts.get('similarity') or 0)
704 565 # we'd use status here, except handling of symlinks and ignore is tricky
705 566 m = match(repo[None], pats, opts)
706 567 rejected = []
707 568 m.bad = lambda x, y: rejected.append(x)
708 569
709 570 added, unknown, deleted, removed = _interestingfiles(repo, m)
710 571
711 572 unknownset = set(unknown)
712 573 toprint = unknownset.copy()
713 574 toprint.update(deleted)
714 575 for abs in sorted(toprint):
715 576 if repo.ui.verbose or not m.exact(abs):
716 577 rel = m.rel(abs)
717 578 if abs in unknownset:
718 579 status = _('adding %s\n') % ((pats and rel) or abs)
719 580 else:
720 581 status = _('removing %s\n') % ((pats and rel) or abs)
721 582 repo.ui.status(status)
722 583
723 584 renames = _findrenames(repo, m, added + unknown, removed + deleted,
724 585 similarity)
725 586
726 587 if not dry_run:
727 588 _markchanges(repo, unknown, deleted, renames)
728 589
729 590 for f in rejected:
730 591 if f in m.files():
731 592 return 1
732 593 return 0
733 594
734 595 def marktouched(repo, files, similarity=0.0):
735 596 '''Assert that files have somehow been operated upon. files are relative to
736 597 the repo root.'''
737 598 m = matchfiles(repo, files)
738 599 rejected = []
739 600 m.bad = lambda x, y: rejected.append(x)
740 601
741 602 added, unknown, deleted, removed = _interestingfiles(repo, m)
742 603
743 604 if repo.ui.verbose:
744 605 unknownset = set(unknown)
745 606 toprint = unknownset.copy()
746 607 toprint.update(deleted)
747 608 for abs in sorted(toprint):
748 609 if abs in unknownset:
749 610 status = _('adding %s\n') % abs
750 611 else:
751 612 status = _('removing %s\n') % abs
752 613 repo.ui.status(status)
753 614
754 615 renames = _findrenames(repo, m, added + unknown, removed + deleted,
755 616 similarity)
756 617
757 618 _markchanges(repo, unknown, deleted, renames)
758 619
759 620 for f in rejected:
760 621 if f in m.files():
761 622 return 1
762 623 return 0
763 624
764 625 def _interestingfiles(repo, matcher):
765 626 '''Walk dirstate with matcher, looking for files that addremove would care
766 627 about.
767 628
768 629 This is different from dirstate.status because it doesn't care about
769 630 whether files are modified or clean.'''
770 631 added, unknown, deleted, removed = [], [], [], []
771 audit_path = pathauditor(repo.root)
632 audit_path = pathutil.pathauditor(repo.root)
772 633
773 634 ctx = repo[None]
774 635 dirstate = repo.dirstate
775 636 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
776 637 full=False)
777 638 for abs, st in walkresults.iteritems():
778 639 dstate = dirstate[abs]
779 640 if dstate == '?' and audit_path.check(abs):
780 641 unknown.append(abs)
781 642 elif dstate != 'r' and not st:
782 643 deleted.append(abs)
783 644 # for finding renames
784 645 elif dstate == 'r':
785 646 removed.append(abs)
786 647 elif dstate == 'a':
787 648 added.append(abs)
788 649
789 650 return added, unknown, deleted, removed
790 651
791 652 def _findrenames(repo, matcher, added, removed, similarity):
792 653 '''Find renames from removed files to added ones.'''
793 654 renames = {}
794 655 if similarity > 0:
795 656 for old, new, score in similar.findrenames(repo, added, removed,
796 657 similarity):
797 658 if (repo.ui.verbose or not matcher.exact(old)
798 659 or not matcher.exact(new)):
799 660 repo.ui.status(_('recording removal of %s as rename to %s '
800 661 '(%d%% similar)\n') %
801 662 (matcher.rel(old), matcher.rel(new),
802 663 score * 100))
803 664 renames[new] = old
804 665 return renames
805 666
806 667 def _markchanges(repo, unknown, deleted, renames):
807 668 '''Marks the files in unknown as added, the files in deleted as removed,
808 669 and the files in renames as copied.'''
809 670 wctx = repo[None]
810 671 wlock = repo.wlock()
811 672 try:
812 673 wctx.forget(deleted)
813 674 wctx.add(unknown)
814 675 for new, old in renames.iteritems():
815 676 wctx.copy(old, new)
816 677 finally:
817 678 wlock.release()
818 679
819 680 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
820 681 """Update the dirstate to reflect the intent of copying src to dst. For
821 682 different reasons it might not end with dst being marked as copied from src.
822 683 """
823 684 origsrc = repo.dirstate.copied(src) or src
824 685 if dst == origsrc: # copying back a copy?
825 686 if repo.dirstate[dst] not in 'mn' and not dryrun:
826 687 repo.dirstate.normallookup(dst)
827 688 else:
828 689 if repo.dirstate[origsrc] == 'a' and origsrc == src:
829 690 if not ui.quiet:
830 691 ui.warn(_("%s has not been committed yet, so no copy "
831 692 "data will be stored for %s.\n")
832 693 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
833 694 if repo.dirstate[dst] in '?r' and not dryrun:
834 695 wctx.add([dst])
835 696 elif not dryrun:
836 697 wctx.copy(origsrc, dst)
837 698
838 699 def readrequires(opener, supported):
839 700 '''Reads and parses .hg/requires and checks if all entries found
840 701 are in the list of supported features.'''
841 702 requirements = set(opener.read("requires").splitlines())
842 703 missings = []
843 704 for r in requirements:
844 705 if r not in supported:
845 706 if not r or not r[0].isalnum():
846 707 raise error.RequirementError(_(".hg/requires file is corrupt"))
847 708 missings.append(r)
848 709 missings.sort()
849 710 if missings:
850 711 raise error.RequirementError(
851 712 _("unknown repository format: requires features '%s' (upgrade "
852 713 "Mercurial)") % "', '".join(missings))
853 714 return requirements
854 715
855 716 class filecacheentry(object):
856 717 def __init__(self, path, stat=True):
857 718 self.path = path
858 719 self.cachestat = None
859 720 self._cacheable = None
860 721
861 722 if stat:
862 723 self.cachestat = filecacheentry.stat(self.path)
863 724
864 725 if self.cachestat:
865 726 self._cacheable = self.cachestat.cacheable()
866 727 else:
867 728 # None means we don't know yet
868 729 self._cacheable = None
869 730
870 731 def refresh(self):
871 732 if self.cacheable():
872 733 self.cachestat = filecacheentry.stat(self.path)
873 734
874 735 def cacheable(self):
875 736 if self._cacheable is not None:
876 737 return self._cacheable
877 738
878 739 # we don't know yet, assume it is for now
879 740 return True
880 741
881 742 def changed(self):
882 743 # no point in going further if we can't cache it
883 744 if not self.cacheable():
884 745 return True
885 746
886 747 newstat = filecacheentry.stat(self.path)
887 748
888 749 # we may not know if it's cacheable yet, check again now
889 750 if newstat and self._cacheable is None:
890 751 self._cacheable = newstat.cacheable()
891 752
892 753 # check again
893 754 if not self._cacheable:
894 755 return True
895 756
896 757 if self.cachestat != newstat:
897 758 self.cachestat = newstat
898 759 return True
899 760 else:
900 761 return False
901 762
902 763 @staticmethod
903 764 def stat(path):
904 765 try:
905 766 return util.cachestat(path)
906 767 except OSError, e:
907 768 if e.errno != errno.ENOENT:
908 769 raise
909 770
910 771 class filecache(object):
911 772 '''A property like decorator that tracks a file under .hg/ for updates.
912 773
913 774 Records stat info when called in _filecache.
914 775
915 776 On subsequent calls, compares old stat info with new info, and recreates
916 777 the object when needed, updating the new stat info in _filecache.
917 778
918 779 Mercurial either atomic renames or appends for files under .hg,
919 780 so to ensure the cache is reliable we need the filesystem to be able
920 781 to tell us if a file has been replaced. If it can't, we fallback to
921 782 recreating the object on every call (essentially the same behaviour as
922 783 propertycache).'''
923 784 def __init__(self, path):
924 785 self.path = path
925 786
926 787 def join(self, obj, fname):
927 788 """Used to compute the runtime path of the cached file.
928 789
929 790 Users should subclass filecache and provide their own version of this
930 791 function to call the appropriate join function on 'obj' (an instance
931 792 of the class that its member function was decorated).
932 793 """
933 794 return obj.join(fname)
934 795
935 796 def __call__(self, func):
936 797 self.func = func
937 798 self.name = func.__name__
938 799 return self
939 800
940 801 def __get__(self, obj, type=None):
941 802 # do we need to check if the file changed?
942 803 if self.name in obj.__dict__:
943 804 assert self.name in obj._filecache, self.name
944 805 return obj.__dict__[self.name]
945 806
946 807 entry = obj._filecache.get(self.name)
947 808
948 809 if entry:
949 810 if entry.changed():
950 811 entry.obj = self.func(obj)
951 812 else:
952 813 path = self.join(obj, self.path)
953 814
954 815 # We stat -before- creating the object so our cache doesn't lie if
955 816 # a writer modified between the time we read and stat
956 817 entry = filecacheentry(path)
957 818 entry.obj = self.func(obj)
958 819
959 820 obj._filecache[self.name] = entry
960 821
961 822 obj.__dict__[self.name] = entry.obj
962 823 return entry.obj
963 824
964 825 def __set__(self, obj, value):
965 826 if self.name not in obj._filecache:
966 827 # we add an entry for the missing value because X in __dict__
967 828 # implies X in _filecache
968 829 ce = filecacheentry(self.join(obj, self.path), False)
969 830 obj._filecache[self.name] = ce
970 831 else:
971 832 ce = obj._filecache[self.name]
972 833
973 834 ce.obj = value # update cached copy
974 835 obj.__dict__[self.name] = value # update copy returned by obj.x
975 836
976 837 def __delete__(self, obj):
977 838 try:
978 839 del obj.__dict__[self.name]
979 840 except KeyError:
980 841 raise AttributeError(self.name)
981 842
982 843 class dirs(object):
983 844 '''a multiset of directory names from a dirstate or manifest'''
984 845
985 846 def __init__(self, map, skip=None):
986 847 self._dirs = {}
987 848 addpath = self.addpath
988 849 if util.safehasattr(map, 'iteritems') and skip is not None:
989 850 for f, s in map.iteritems():
990 851 if s[0] != skip:
991 852 addpath(f)
992 853 else:
993 854 for f in map:
994 855 addpath(f)
995 856
996 857 def addpath(self, path):
997 858 dirs = self._dirs
998 859 for base in finddirs(path):
999 860 if base in dirs:
1000 861 dirs[base] += 1
1001 862 return
1002 863 dirs[base] = 1
1003 864
1004 865 def delpath(self, path):
1005 866 dirs = self._dirs
1006 867 for base in finddirs(path):
1007 868 if dirs[base] > 1:
1008 869 dirs[base] -= 1
1009 870 return
1010 871 del dirs[base]
1011 872
1012 873 def __iter__(self):
1013 874 return self._dirs.iterkeys()
1014 875
1015 876 def __contains__(self, d):
1016 877 return d in self._dirs
1017 878
1018 879 if util.safehasattr(parsers, 'dirs'):
1019 880 dirs = parsers.dirs
1020 881
1021 882 def finddirs(path):
1022 883 pos = path.rfind('/')
1023 884 while pos != -1:
1024 885 yield path[:pos]
1025 886 pos = path.rfind('/', 0, pos)
@@ -1,1469 +1,1470 b''
1 1 # subrepo.py - sub-repository handling for Mercurial
2 2 #
3 3 # Copyright 2009-2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import errno, os, re, shutil, posixpath, sys
9 9 import xml.dom.minidom
10 10 import stat, subprocess, tarfile
11 11 from i18n import _
12 import config, scmutil, util, node, error, cmdutil, bookmarks, match as matchmod
12 import config, util, node, error, cmdutil, bookmarks, match as matchmod
13 import pathutil
13 14 hg = None
14 15 propertycache = util.propertycache
15 16
16 17 nullstate = ('', '', 'empty')
17 18
18 19 def _expandedabspath(path):
19 20 '''
20 21 get a path or url and if it is a path expand it and return an absolute path
21 22 '''
22 23 expandedpath = util.urllocalpath(util.expandpath(path))
23 24 u = util.url(expandedpath)
24 25 if not u.scheme:
25 26 path = util.normpath(os.path.abspath(u.path))
26 27 return path
27 28
28 29 def _getstorehashcachename(remotepath):
29 30 '''get a unique filename for the store hash cache of a remote repository'''
30 31 return util.sha1(_expandedabspath(remotepath)).hexdigest()[0:12]
31 32
32 33 def _calcfilehash(filename):
33 34 data = ''
34 35 if os.path.exists(filename):
35 36 fd = open(filename, 'rb')
36 37 data = fd.read()
37 38 fd.close()
38 39 return util.sha1(data).hexdigest()
39 40
40 41 class SubrepoAbort(error.Abort):
41 42 """Exception class used to avoid handling a subrepo error more than once"""
42 43 def __init__(self, *args, **kw):
43 44 error.Abort.__init__(self, *args, **kw)
44 45 self.subrepo = kw.get('subrepo')
45 46 self.cause = kw.get('cause')
46 47
47 48 def annotatesubrepoerror(func):
48 49 def decoratedmethod(self, *args, **kargs):
49 50 try:
50 51 res = func(self, *args, **kargs)
51 52 except SubrepoAbort, ex:
52 53 # This exception has already been handled
53 54 raise ex
54 55 except error.Abort, ex:
55 56 subrepo = subrelpath(self)
56 57 errormsg = str(ex) + ' ' + _('(in subrepo %s)') % subrepo
57 58 # avoid handling this exception by raising a SubrepoAbort exception
58 59 raise SubrepoAbort(errormsg, hint=ex.hint, subrepo=subrepo,
59 60 cause=sys.exc_info())
60 61 return res
61 62 return decoratedmethod
62 63
63 64 def state(ctx, ui):
64 65 """return a state dict, mapping subrepo paths configured in .hgsub
65 66 to tuple: (source from .hgsub, revision from .hgsubstate, kind
66 67 (key in types dict))
67 68 """
68 69 p = config.config()
69 70 def read(f, sections=None, remap=None):
70 71 if f in ctx:
71 72 try:
72 73 data = ctx[f].data()
73 74 except IOError, err:
74 75 if err.errno != errno.ENOENT:
75 76 raise
76 77 # handle missing subrepo spec files as removed
77 78 ui.warn(_("warning: subrepo spec file %s not found\n") % f)
78 79 return
79 80 p.parse(f, data, sections, remap, read)
80 81 else:
81 82 raise util.Abort(_("subrepo spec file %s not found") % f)
82 83
83 84 if '.hgsub' in ctx:
84 85 read('.hgsub')
85 86
86 87 for path, src in ui.configitems('subpaths'):
87 88 p.set('subpaths', path, src, ui.configsource('subpaths', path))
88 89
89 90 rev = {}
90 91 if '.hgsubstate' in ctx:
91 92 try:
92 93 for i, l in enumerate(ctx['.hgsubstate'].data().splitlines()):
93 94 l = l.lstrip()
94 95 if not l:
95 96 continue
96 97 try:
97 98 revision, path = l.split(" ", 1)
98 99 except ValueError:
99 100 raise util.Abort(_("invalid subrepository revision "
100 101 "specifier in .hgsubstate line %d")
101 102 % (i + 1))
102 103 rev[path] = revision
103 104 except IOError, err:
104 105 if err.errno != errno.ENOENT:
105 106 raise
106 107
107 108 def remap(src):
108 109 for pattern, repl in p.items('subpaths'):
109 110 # Turn r'C:\foo\bar' into r'C:\\foo\\bar' since re.sub
110 111 # does a string decode.
111 112 repl = repl.encode('string-escape')
112 113 # However, we still want to allow back references to go
113 114 # through unharmed, so we turn r'\\1' into r'\1'. Again,
114 115 # extra escapes are needed because re.sub string decodes.
115 116 repl = re.sub(r'\\\\([0-9]+)', r'\\\1', repl)
116 117 try:
117 118 src = re.sub(pattern, repl, src, 1)
118 119 except re.error, e:
119 120 raise util.Abort(_("bad subrepository pattern in %s: %s")
120 121 % (p.source('subpaths', pattern), e))
121 122 return src
122 123
123 124 state = {}
124 125 for path, src in p[''].items():
125 126 kind = 'hg'
126 127 if src.startswith('['):
127 128 if ']' not in src:
128 129 raise util.Abort(_('missing ] in subrepo source'))
129 130 kind, src = src.split(']', 1)
130 131 kind = kind[1:]
131 132 src = src.lstrip() # strip any extra whitespace after ']'
132 133
133 134 if not util.url(src).isabs():
134 135 parent = _abssource(ctx._repo, abort=False)
135 136 if parent:
136 137 parent = util.url(parent)
137 138 parent.path = posixpath.join(parent.path or '', src)
138 139 parent.path = posixpath.normpath(parent.path)
139 140 joined = str(parent)
140 141 # Remap the full joined path and use it if it changes,
141 142 # else remap the original source.
142 143 remapped = remap(joined)
143 144 if remapped == joined:
144 145 src = remap(src)
145 146 else:
146 147 src = remapped
147 148
148 149 src = remap(src)
149 150 state[util.pconvert(path)] = (src.strip(), rev.get(path, ''), kind)
150 151
151 152 return state
152 153
153 154 def writestate(repo, state):
154 155 """rewrite .hgsubstate in (outer) repo with these subrepo states"""
155 156 lines = ['%s %s\n' % (state[s][1], s) for s in sorted(state)]
156 157 repo.wwrite('.hgsubstate', ''.join(lines), '')
157 158
158 159 def submerge(repo, wctx, mctx, actx, overwrite):
159 160 """delegated from merge.applyupdates: merging of .hgsubstate file
160 161 in working context, merging context and ancestor context"""
161 162 if mctx == actx: # backwards?
162 163 actx = wctx.p1()
163 164 s1 = wctx.substate
164 165 s2 = mctx.substate
165 166 sa = actx.substate
166 167 sm = {}
167 168
168 169 repo.ui.debug("subrepo merge %s %s %s\n" % (wctx, mctx, actx))
169 170
170 171 def debug(s, msg, r=""):
171 172 if r:
172 173 r = "%s:%s:%s" % r
173 174 repo.ui.debug(" subrepo %s: %s %s\n" % (s, msg, r))
174 175
175 176 for s, l in sorted(s1.iteritems()):
176 177 a = sa.get(s, nullstate)
177 178 ld = l # local state with possible dirty flag for compares
178 179 if wctx.sub(s).dirty():
179 180 ld = (l[0], l[1] + "+")
180 181 if wctx == actx: # overwrite
181 182 a = ld
182 183
183 184 if s in s2:
184 185 r = s2[s]
185 186 if ld == r or r == a: # no change or local is newer
186 187 sm[s] = l
187 188 continue
188 189 elif ld == a: # other side changed
189 190 debug(s, "other changed, get", r)
190 191 wctx.sub(s).get(r, overwrite)
191 192 sm[s] = r
192 193 elif ld[0] != r[0]: # sources differ
193 194 if repo.ui.promptchoice(
194 195 _(' subrepository sources for %s differ\n'
195 196 'use (l)ocal source (%s) or (r)emote source (%s)?'
196 197 '$$ &Local $$ &Remote') % (s, l[0], r[0]), 0):
197 198 debug(s, "prompt changed, get", r)
198 199 wctx.sub(s).get(r, overwrite)
199 200 sm[s] = r
200 201 elif ld[1] == a[1]: # local side is unchanged
201 202 debug(s, "other side changed, get", r)
202 203 wctx.sub(s).get(r, overwrite)
203 204 sm[s] = r
204 205 else:
205 206 debug(s, "both sides changed")
206 207 option = repo.ui.promptchoice(
207 208 _(' subrepository %s diverged (local revision: %s, '
208 209 'remote revision: %s)\n'
209 210 '(M)erge, keep (l)ocal or keep (r)emote?'
210 211 '$$ &Merge $$ &Local $$ &Remote')
211 212 % (s, l[1][:12], r[1][:12]), 0)
212 213 if option == 0:
213 214 wctx.sub(s).merge(r)
214 215 sm[s] = l
215 216 debug(s, "merge with", r)
216 217 elif option == 1:
217 218 sm[s] = l
218 219 debug(s, "keep local subrepo revision", l)
219 220 else:
220 221 wctx.sub(s).get(r, overwrite)
221 222 sm[s] = r
222 223 debug(s, "get remote subrepo revision", r)
223 224 elif ld == a: # remote removed, local unchanged
224 225 debug(s, "remote removed, remove")
225 226 wctx.sub(s).remove()
226 227 elif a == nullstate: # not present in remote or ancestor
227 228 debug(s, "local added, keep")
228 229 sm[s] = l
229 230 continue
230 231 else:
231 232 if repo.ui.promptchoice(
232 233 _(' local changed subrepository %s which remote removed\n'
233 234 'use (c)hanged version or (d)elete?'
234 235 '$$ &Changed $$ &Delete') % s, 0):
235 236 debug(s, "prompt remove")
236 237 wctx.sub(s).remove()
237 238
238 239 for s, r in sorted(s2.items()):
239 240 if s in s1:
240 241 continue
241 242 elif s not in sa:
242 243 debug(s, "remote added, get", r)
243 244 mctx.sub(s).get(r)
244 245 sm[s] = r
245 246 elif r != sa[s]:
246 247 if repo.ui.promptchoice(
247 248 _(' remote changed subrepository %s which local removed\n'
248 249 'use (c)hanged version or (d)elete?'
249 250 '$$ &Changed $$ &Delete') % s, 0) == 0:
250 251 debug(s, "prompt recreate", r)
251 252 wctx.sub(s).get(r)
252 253 sm[s] = r
253 254
254 255 # record merged .hgsubstate
255 256 writestate(repo, sm)
256 257 return sm
257 258
258 259 def _updateprompt(ui, sub, dirty, local, remote):
259 260 if dirty:
260 261 msg = (_(' subrepository sources for %s differ\n'
261 262 'use (l)ocal source (%s) or (r)emote source (%s)?\n'
262 263 '$$ &Local $$ &Remote')
263 264 % (subrelpath(sub), local, remote))
264 265 else:
265 266 msg = (_(' subrepository sources for %s differ (in checked out '
266 267 'version)\n'
267 268 'use (l)ocal source (%s) or (r)emote source (%s)?\n'
268 269 '$$ &Local $$ &Remote')
269 270 % (subrelpath(sub), local, remote))
270 271 return ui.promptchoice(msg, 0)
271 272
272 273 def reporelpath(repo):
273 274 """return path to this (sub)repo as seen from outermost repo"""
274 275 parent = repo
275 276 while util.safehasattr(parent, '_subparent'):
276 277 parent = parent._subparent
277 278 p = parent.root.rstrip(os.sep)
278 279 return repo.root[len(p) + 1:]
279 280
280 281 def subrelpath(sub):
281 282 """return path to this subrepo as seen from outermost repo"""
282 283 if util.safehasattr(sub, '_relpath'):
283 284 return sub._relpath
284 285 if not util.safehasattr(sub, '_repo'):
285 286 return sub._path
286 287 return reporelpath(sub._repo)
287 288
288 289 def _abssource(repo, push=False, abort=True):
289 290 """return pull/push path of repo - either based on parent repo .hgsub info
290 291 or on the top repo config. Abort or return None if no source found."""
291 292 if util.safehasattr(repo, '_subparent'):
292 293 source = util.url(repo._subsource)
293 294 if source.isabs():
294 295 return str(source)
295 296 source.path = posixpath.normpath(source.path)
296 297 parent = _abssource(repo._subparent, push, abort=False)
297 298 if parent:
298 299 parent = util.url(util.pconvert(parent))
299 300 parent.path = posixpath.join(parent.path or '', source.path)
300 301 parent.path = posixpath.normpath(parent.path)
301 302 return str(parent)
302 303 else: # recursion reached top repo
303 304 if util.safehasattr(repo, '_subtoppath'):
304 305 return repo._subtoppath
305 306 if push and repo.ui.config('paths', 'default-push'):
306 307 return repo.ui.config('paths', 'default-push')
307 308 if repo.ui.config('paths', 'default'):
308 309 return repo.ui.config('paths', 'default')
309 310 if repo.sharedpath != repo.path:
310 311 # chop off the .hg component to get the default path form
311 312 return os.path.dirname(repo.sharedpath)
312 313 if abort:
313 314 raise util.Abort(_("default path for subrepository not found"))
314 315
315 316 def itersubrepos(ctx1, ctx2):
316 317 """find subrepos in ctx1 or ctx2"""
317 318 # Create a (subpath, ctx) mapping where we prefer subpaths from
318 319 # ctx1. The subpaths from ctx2 are important when the .hgsub file
319 320 # has been modified (in ctx2) but not yet committed (in ctx1).
320 321 subpaths = dict.fromkeys(ctx2.substate, ctx2)
321 322 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
322 323 for subpath, ctx in sorted(subpaths.iteritems()):
323 324 yield subpath, ctx.sub(subpath)
324 325
325 326 def subrepo(ctx, path):
326 327 """return instance of the right subrepo class for subrepo in path"""
327 328 # subrepo inherently violates our import layering rules
328 329 # because it wants to make repo objects from deep inside the stack
329 330 # so we manually delay the circular imports to not break
330 331 # scripts that don't use our demand-loading
331 332 global hg
332 333 import hg as h
333 334 hg = h
334 335
335 scmutil.pathauditor(ctx._repo.root)(path)
336 pathutil.pathauditor(ctx._repo.root)(path)
336 337 state = ctx.substate[path]
337 338 if state[2] not in types:
338 339 raise util.Abort(_('unknown subrepo type %s') % state[2])
339 340 return types[state[2]](ctx, path, state[:2])
340 341
341 342 # subrepo classes need to implement the following abstract class:
342 343
343 344 class abstractsubrepo(object):
344 345
345 346 def storeclean(self, path):
346 347 """
347 348 returns true if the repository has not changed since it was last
348 349 cloned from or pushed to a given repository.
349 350 """
350 351 return False
351 352
352 353 def dirty(self, ignoreupdate=False):
353 354 """returns true if the dirstate of the subrepo is dirty or does not
354 355 match current stored state. If ignoreupdate is true, only check
355 356 whether the subrepo has uncommitted changes in its dirstate.
356 357 """
357 358 raise NotImplementedError
358 359
359 360 def basestate(self):
360 361 """current working directory base state, disregarding .hgsubstate
361 362 state and working directory modifications"""
362 363 raise NotImplementedError
363 364
364 365 def checknested(self, path):
365 366 """check if path is a subrepository within this repository"""
366 367 return False
367 368
368 369 def commit(self, text, user, date):
369 370 """commit the current changes to the subrepo with the given
370 371 log message. Use given user and date if possible. Return the
371 372 new state of the subrepo.
372 373 """
373 374 raise NotImplementedError
374 375
375 376 def remove(self):
376 377 """remove the subrepo
377 378
378 379 (should verify the dirstate is not dirty first)
379 380 """
380 381 raise NotImplementedError
381 382
382 383 def get(self, state, overwrite=False):
383 384 """run whatever commands are needed to put the subrepo into
384 385 this state
385 386 """
386 387 raise NotImplementedError
387 388
388 389 def merge(self, state):
389 390 """merge currently-saved state with the new state."""
390 391 raise NotImplementedError
391 392
392 393 def push(self, opts):
393 394 """perform whatever action is analogous to 'hg push'
394 395
395 396 This may be a no-op on some systems.
396 397 """
397 398 raise NotImplementedError
398 399
399 400 def add(self, ui, match, dryrun, listsubrepos, prefix, explicitonly):
400 401 return []
401 402
402 403 def status(self, rev2, **opts):
403 404 return [], [], [], [], [], [], []
404 405
405 406 def diff(self, ui, diffopts, node2, match, prefix, **opts):
406 407 pass
407 408
408 409 def outgoing(self, ui, dest, opts):
409 410 return 1
410 411
411 412 def incoming(self, ui, source, opts):
412 413 return 1
413 414
414 415 def files(self):
415 416 """return filename iterator"""
416 417 raise NotImplementedError
417 418
418 419 def filedata(self, name):
419 420 """return file data"""
420 421 raise NotImplementedError
421 422
422 423 def fileflags(self, name):
423 424 """return file flags"""
424 425 return ''
425 426
426 427 def archive(self, ui, archiver, prefix, match=None):
427 428 if match is not None:
428 429 files = [f for f in self.files() if match(f)]
429 430 else:
430 431 files = self.files()
431 432 total = len(files)
432 433 relpath = subrelpath(self)
433 434 ui.progress(_('archiving (%s)') % relpath, 0,
434 435 unit=_('files'), total=total)
435 436 for i, name in enumerate(files):
436 437 flags = self.fileflags(name)
437 438 mode = 'x' in flags and 0755 or 0644
438 439 symlink = 'l' in flags
439 440 archiver.addfile(os.path.join(prefix, self._path, name),
440 441 mode, symlink, self.filedata(name))
441 442 ui.progress(_('archiving (%s)') % relpath, i + 1,
442 443 unit=_('files'), total=total)
443 444 ui.progress(_('archiving (%s)') % relpath, None)
444 445 return total
445 446
446 447 def walk(self, match):
447 448 '''
448 449 walk recursively through the directory tree, finding all files
449 450 matched by the match function
450 451 '''
451 452 pass
452 453
453 454 def forget(self, ui, match, prefix):
454 455 return ([], [])
455 456
456 457 def revert(self, ui, substate, *pats, **opts):
457 458 ui.warn('%s: reverting %s subrepos is unsupported\n' \
458 459 % (substate[0], substate[2]))
459 460 return []
460 461
461 462 class hgsubrepo(abstractsubrepo):
462 463 def __init__(self, ctx, path, state):
463 464 self._path = path
464 465 self._state = state
465 466 r = ctx._repo
466 467 root = r.wjoin(path)
467 468 create = False
468 469 if not os.path.exists(os.path.join(root, '.hg')):
469 470 create = True
470 471 util.makedirs(root)
471 472 self._repo = hg.repository(r.baseui, root, create=create)
472 473 for s, k in [('ui', 'commitsubrepos')]:
473 474 v = r.ui.config(s, k)
474 475 if v:
475 476 self._repo.ui.setconfig(s, k, v)
476 477 self._repo.ui.setconfig('ui', '_usedassubrepo', 'True')
477 478 self._initrepo(r, state[0], create)
478 479
479 480 def storeclean(self, path):
480 481 clean = True
481 482 lock = self._repo.lock()
482 483 itercache = self._calcstorehash(path)
483 484 try:
484 485 for filehash in self._readstorehashcache(path):
485 486 if filehash != itercache.next():
486 487 clean = False
487 488 break
488 489 except StopIteration:
489 490 # the cached and current pull states have a different size
490 491 clean = False
491 492 if clean:
492 493 try:
493 494 itercache.next()
494 495 # the cached and current pull states have a different size
495 496 clean = False
496 497 except StopIteration:
497 498 pass
498 499 lock.release()
499 500 return clean
500 501
501 502 def _calcstorehash(self, remotepath):
502 503 '''calculate a unique "store hash"
503 504
504 505 This method is used to to detect when there are changes that may
505 506 require a push to a given remote path.'''
506 507 # sort the files that will be hashed in increasing (likely) file size
507 508 filelist = ('bookmarks', 'store/phaseroots', 'store/00changelog.i')
508 509 yield '# %s\n' % _expandedabspath(remotepath)
509 510 for relname in filelist:
510 511 absname = os.path.normpath(self._repo.join(relname))
511 512 yield '%s = %s\n' % (relname, _calcfilehash(absname))
512 513
513 514 def _getstorehashcachepath(self, remotepath):
514 515 '''get a unique path for the store hash cache'''
515 516 return self._repo.join(os.path.join(
516 517 'cache', 'storehash', _getstorehashcachename(remotepath)))
517 518
518 519 def _readstorehashcache(self, remotepath):
519 520 '''read the store hash cache for a given remote repository'''
520 521 cachefile = self._getstorehashcachepath(remotepath)
521 522 if not os.path.exists(cachefile):
522 523 return ''
523 524 fd = open(cachefile, 'r')
524 525 pullstate = fd.readlines()
525 526 fd.close()
526 527 return pullstate
527 528
528 529 def _cachestorehash(self, remotepath):
529 530 '''cache the current store hash
530 531
531 532 Each remote repo requires its own store hash cache, because a subrepo
532 533 store may be "clean" versus a given remote repo, but not versus another
533 534 '''
534 535 cachefile = self._getstorehashcachepath(remotepath)
535 536 lock = self._repo.lock()
536 537 storehash = list(self._calcstorehash(remotepath))
537 538 cachedir = os.path.dirname(cachefile)
538 539 if not os.path.exists(cachedir):
539 540 util.makedirs(cachedir, notindexed=True)
540 541 fd = open(cachefile, 'w')
541 542 fd.writelines(storehash)
542 543 fd.close()
543 544 lock.release()
544 545
545 546 @annotatesubrepoerror
546 547 def _initrepo(self, parentrepo, source, create):
547 548 self._repo._subparent = parentrepo
548 549 self._repo._subsource = source
549 550
550 551 if create:
551 552 fp = self._repo.opener("hgrc", "w", text=True)
552 553 fp.write('[paths]\n')
553 554
554 555 def addpathconfig(key, value):
555 556 if value:
556 557 fp.write('%s = %s\n' % (key, value))
557 558 self._repo.ui.setconfig('paths', key, value)
558 559
559 560 defpath = _abssource(self._repo, abort=False)
560 561 defpushpath = _abssource(self._repo, True, abort=False)
561 562 addpathconfig('default', defpath)
562 563 if defpath != defpushpath:
563 564 addpathconfig('default-push', defpushpath)
564 565 fp.close()
565 566
566 567 @annotatesubrepoerror
567 568 def add(self, ui, match, dryrun, listsubrepos, prefix, explicitonly):
568 569 return cmdutil.add(ui, self._repo, match, dryrun, listsubrepos,
569 570 os.path.join(prefix, self._path), explicitonly)
570 571
571 572 @annotatesubrepoerror
572 573 def status(self, rev2, **opts):
573 574 try:
574 575 rev1 = self._state[1]
575 576 ctx1 = self._repo[rev1]
576 577 ctx2 = self._repo[rev2]
577 578 return self._repo.status(ctx1, ctx2, **opts)
578 579 except error.RepoLookupError, inst:
579 580 self._repo.ui.warn(_('warning: error "%s" in subrepository "%s"\n')
580 581 % (inst, subrelpath(self)))
581 582 return [], [], [], [], [], [], []
582 583
583 584 @annotatesubrepoerror
584 585 def diff(self, ui, diffopts, node2, match, prefix, **opts):
585 586 try:
586 587 node1 = node.bin(self._state[1])
587 588 # We currently expect node2 to come from substate and be
588 589 # in hex format
589 590 if node2 is not None:
590 591 node2 = node.bin(node2)
591 592 cmdutil.diffordiffstat(ui, self._repo, diffopts,
592 593 node1, node2, match,
593 594 prefix=posixpath.join(prefix, self._path),
594 595 listsubrepos=True, **opts)
595 596 except error.RepoLookupError, inst:
596 597 self._repo.ui.warn(_('warning: error "%s" in subrepository "%s"\n')
597 598 % (inst, subrelpath(self)))
598 599
599 600 @annotatesubrepoerror
600 601 def archive(self, ui, archiver, prefix, match=None):
601 602 self._get(self._state + ('hg',))
602 603 total = abstractsubrepo.archive(self, ui, archiver, prefix, match)
603 604 rev = self._state[1]
604 605 ctx = self._repo[rev]
605 606 for subpath in ctx.substate:
606 607 s = subrepo(ctx, subpath)
607 608 submatch = matchmod.narrowmatcher(subpath, match)
608 609 total += s.archive(
609 610 ui, archiver, os.path.join(prefix, self._path), submatch)
610 611 return total
611 612
612 613 @annotatesubrepoerror
613 614 def dirty(self, ignoreupdate=False):
614 615 r = self._state[1]
615 616 if r == '' and not ignoreupdate: # no state recorded
616 617 return True
617 618 w = self._repo[None]
618 619 if r != w.p1().hex() and not ignoreupdate:
619 620 # different version checked out
620 621 return True
621 622 return w.dirty() # working directory changed
622 623
623 624 def basestate(self):
624 625 return self._repo['.'].hex()
625 626
626 627 def checknested(self, path):
627 628 return self._repo._checknested(self._repo.wjoin(path))
628 629
629 630 @annotatesubrepoerror
630 631 def commit(self, text, user, date):
631 632 # don't bother committing in the subrepo if it's only been
632 633 # updated
633 634 if not self.dirty(True):
634 635 return self._repo['.'].hex()
635 636 self._repo.ui.debug("committing subrepo %s\n" % subrelpath(self))
636 637 n = self._repo.commit(text, user, date)
637 638 if not n:
638 639 return self._repo['.'].hex() # different version checked out
639 640 return node.hex(n)
640 641
641 642 @annotatesubrepoerror
642 643 def remove(self):
643 644 # we can't fully delete the repository as it may contain
644 645 # local-only history
645 646 self._repo.ui.note(_('removing subrepo %s\n') % subrelpath(self))
646 647 hg.clean(self._repo, node.nullid, False)
647 648
648 649 def _get(self, state):
649 650 source, revision, kind = state
650 651 if revision not in self._repo:
651 652 self._repo._subsource = source
652 653 srcurl = _abssource(self._repo)
653 654 other = hg.peer(self._repo, {}, srcurl)
654 655 if len(self._repo) == 0:
655 656 self._repo.ui.status(_('cloning subrepo %s from %s\n')
656 657 % (subrelpath(self), srcurl))
657 658 parentrepo = self._repo._subparent
658 659 shutil.rmtree(self._repo.path)
659 660 other, cloned = hg.clone(self._repo._subparent.baseui, {},
660 661 other, self._repo.root,
661 662 update=False)
662 663 self._repo = cloned.local()
663 664 self._initrepo(parentrepo, source, create=True)
664 665 self._cachestorehash(srcurl)
665 666 else:
666 667 self._repo.ui.status(_('pulling subrepo %s from %s\n')
667 668 % (subrelpath(self), srcurl))
668 669 cleansub = self.storeclean(srcurl)
669 670 remotebookmarks = other.listkeys('bookmarks')
670 671 self._repo.pull(other)
671 672 bookmarks.updatefromremote(self._repo.ui, self._repo,
672 673 remotebookmarks, srcurl)
673 674 if cleansub:
674 675 # keep the repo clean after pull
675 676 self._cachestorehash(srcurl)
676 677
677 678 @annotatesubrepoerror
678 679 def get(self, state, overwrite=False):
679 680 self._get(state)
680 681 source, revision, kind = state
681 682 self._repo.ui.debug("getting subrepo %s\n" % self._path)
682 683 hg.updaterepo(self._repo, revision, overwrite)
683 684
684 685 @annotatesubrepoerror
685 686 def merge(self, state):
686 687 self._get(state)
687 688 cur = self._repo['.']
688 689 dst = self._repo[state[1]]
689 690 anc = dst.ancestor(cur)
690 691
691 692 def mergefunc():
692 693 if anc == cur and dst.branch() == cur.branch():
693 694 self._repo.ui.debug("updating subrepo %s\n" % subrelpath(self))
694 695 hg.update(self._repo, state[1])
695 696 elif anc == dst:
696 697 self._repo.ui.debug("skipping subrepo %s\n" % subrelpath(self))
697 698 else:
698 699 self._repo.ui.debug("merging subrepo %s\n" % subrelpath(self))
699 700 hg.merge(self._repo, state[1], remind=False)
700 701
701 702 wctx = self._repo[None]
702 703 if self.dirty():
703 704 if anc != dst:
704 705 if _updateprompt(self._repo.ui, self, wctx.dirty(), cur, dst):
705 706 mergefunc()
706 707 else:
707 708 mergefunc()
708 709 else:
709 710 mergefunc()
710 711
711 712 @annotatesubrepoerror
712 713 def push(self, opts):
713 714 force = opts.get('force')
714 715 newbranch = opts.get('new_branch')
715 716 ssh = opts.get('ssh')
716 717
717 718 # push subrepos depth-first for coherent ordering
718 719 c = self._repo['']
719 720 subs = c.substate # only repos that are committed
720 721 for s in sorted(subs):
721 722 if c.sub(s).push(opts) == 0:
722 723 return False
723 724
724 725 dsturl = _abssource(self._repo, True)
725 726 if not force:
726 727 if self.storeclean(dsturl):
727 728 self._repo.ui.status(
728 729 _('no changes made to subrepo %s since last push to %s\n')
729 730 % (subrelpath(self), dsturl))
730 731 return None
731 732 self._repo.ui.status(_('pushing subrepo %s to %s\n') %
732 733 (subrelpath(self), dsturl))
733 734 other = hg.peer(self._repo, {'ssh': ssh}, dsturl)
734 735 res = self._repo.push(other, force, newbranch=newbranch)
735 736
736 737 # the repo is now clean
737 738 self._cachestorehash(dsturl)
738 739 return res
739 740
740 741 @annotatesubrepoerror
741 742 def outgoing(self, ui, dest, opts):
742 743 return hg.outgoing(ui, self._repo, _abssource(self._repo, True), opts)
743 744
744 745 @annotatesubrepoerror
745 746 def incoming(self, ui, source, opts):
746 747 return hg.incoming(ui, self._repo, _abssource(self._repo, False), opts)
747 748
748 749 @annotatesubrepoerror
749 750 def files(self):
750 751 rev = self._state[1]
751 752 ctx = self._repo[rev]
752 753 return ctx.manifest()
753 754
754 755 def filedata(self, name):
755 756 rev = self._state[1]
756 757 return self._repo[rev][name].data()
757 758
758 759 def fileflags(self, name):
759 760 rev = self._state[1]
760 761 ctx = self._repo[rev]
761 762 return ctx.flags(name)
762 763
763 764 def walk(self, match):
764 765 ctx = self._repo[None]
765 766 return ctx.walk(match)
766 767
767 768 @annotatesubrepoerror
768 769 def forget(self, ui, match, prefix):
769 770 return cmdutil.forget(ui, self._repo, match,
770 771 os.path.join(prefix, self._path), True)
771 772
772 773 @annotatesubrepoerror
773 774 def revert(self, ui, substate, *pats, **opts):
774 775 # reverting a subrepo is a 2 step process:
775 776 # 1. if the no_backup is not set, revert all modified
776 777 # files inside the subrepo
777 778 # 2. update the subrepo to the revision specified in
778 779 # the corresponding substate dictionary
779 780 ui.status(_('reverting subrepo %s\n') % substate[0])
780 781 if not opts.get('no_backup'):
781 782 # Revert all files on the subrepo, creating backups
782 783 # Note that this will not recursively revert subrepos
783 784 # We could do it if there was a set:subrepos() predicate
784 785 opts = opts.copy()
785 786 opts['date'] = None
786 787 opts['rev'] = substate[1]
787 788
788 789 pats = []
789 790 if not opts.get('all'):
790 791 pats = ['set:modified()']
791 792 self.filerevert(ui, *pats, **opts)
792 793
793 794 # Update the repo to the revision specified in the given substate
794 795 self.get(substate, overwrite=True)
795 796
796 797 def filerevert(self, ui, *pats, **opts):
797 798 ctx = self._repo[opts['rev']]
798 799 parents = self._repo.dirstate.parents()
799 800 if opts.get('all'):
800 801 pats = ['set:modified()']
801 802 else:
802 803 pats = []
803 804 cmdutil.revert(ui, self._repo, ctx, parents, *pats, **opts)
804 805
805 806 class svnsubrepo(abstractsubrepo):
806 807 def __init__(self, ctx, path, state):
807 808 self._path = path
808 809 self._state = state
809 810 self._ctx = ctx
810 811 self._ui = ctx._repo.ui
811 812 self._exe = util.findexe('svn')
812 813 if not self._exe:
813 814 raise util.Abort(_("'svn' executable not found for subrepo '%s'")
814 815 % self._path)
815 816
816 817 def _svncommand(self, commands, filename='', failok=False):
817 818 cmd = [self._exe]
818 819 extrakw = {}
819 820 if not self._ui.interactive():
820 821 # Making stdin be a pipe should prevent svn from behaving
821 822 # interactively even if we can't pass --non-interactive.
822 823 extrakw['stdin'] = subprocess.PIPE
823 824 # Starting in svn 1.5 --non-interactive is a global flag
824 825 # instead of being per-command, but we need to support 1.4 so
825 826 # we have to be intelligent about what commands take
826 827 # --non-interactive.
827 828 if commands[0] in ('update', 'checkout', 'commit'):
828 829 cmd.append('--non-interactive')
829 830 cmd.extend(commands)
830 831 if filename is not None:
831 832 path = os.path.join(self._ctx._repo.origroot, self._path, filename)
832 833 cmd.append(path)
833 834 env = dict(os.environ)
834 835 # Avoid localized output, preserve current locale for everything else.
835 836 lc_all = env.get('LC_ALL')
836 837 if lc_all:
837 838 env['LANG'] = lc_all
838 839 del env['LC_ALL']
839 840 env['LC_MESSAGES'] = 'C'
840 841 p = subprocess.Popen(cmd, bufsize=-1, close_fds=util.closefds,
841 842 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
842 843 universal_newlines=True, env=env, **extrakw)
843 844 stdout, stderr = p.communicate()
844 845 stderr = stderr.strip()
845 846 if not failok:
846 847 if p.returncode:
847 848 raise util.Abort(stderr or 'exited with code %d' % p.returncode)
848 849 if stderr:
849 850 self._ui.warn(stderr + '\n')
850 851 return stdout, stderr
851 852
852 853 @propertycache
853 854 def _svnversion(self):
854 855 output, err = self._svncommand(['--version', '--quiet'], filename=None)
855 856 m = re.search(r'^(\d+)\.(\d+)', output)
856 857 if not m:
857 858 raise util.Abort(_('cannot retrieve svn tool version'))
858 859 return (int(m.group(1)), int(m.group(2)))
859 860
860 861 def _wcrevs(self):
861 862 # Get the working directory revision as well as the last
862 863 # commit revision so we can compare the subrepo state with
863 864 # both. We used to store the working directory one.
864 865 output, err = self._svncommand(['info', '--xml'])
865 866 doc = xml.dom.minidom.parseString(output)
866 867 entries = doc.getElementsByTagName('entry')
867 868 lastrev, rev = '0', '0'
868 869 if entries:
869 870 rev = str(entries[0].getAttribute('revision')) or '0'
870 871 commits = entries[0].getElementsByTagName('commit')
871 872 if commits:
872 873 lastrev = str(commits[0].getAttribute('revision')) or '0'
873 874 return (lastrev, rev)
874 875
875 876 def _wcrev(self):
876 877 return self._wcrevs()[0]
877 878
878 879 def _wcchanged(self):
879 880 """Return (changes, extchanges, missing) where changes is True
880 881 if the working directory was changed, extchanges is
881 882 True if any of these changes concern an external entry and missing
882 883 is True if any change is a missing entry.
883 884 """
884 885 output, err = self._svncommand(['status', '--xml'])
885 886 externals, changes, missing = [], [], []
886 887 doc = xml.dom.minidom.parseString(output)
887 888 for e in doc.getElementsByTagName('entry'):
888 889 s = e.getElementsByTagName('wc-status')
889 890 if not s:
890 891 continue
891 892 item = s[0].getAttribute('item')
892 893 props = s[0].getAttribute('props')
893 894 path = e.getAttribute('path')
894 895 if item == 'external':
895 896 externals.append(path)
896 897 elif item == 'missing':
897 898 missing.append(path)
898 899 if (item not in ('', 'normal', 'unversioned', 'external')
899 900 or props not in ('', 'none', 'normal')):
900 901 changes.append(path)
901 902 for path in changes:
902 903 for ext in externals:
903 904 if path == ext or path.startswith(ext + os.sep):
904 905 return True, True, bool(missing)
905 906 return bool(changes), False, bool(missing)
906 907
907 908 def dirty(self, ignoreupdate=False):
908 909 if not self._wcchanged()[0]:
909 910 if self._state[1] in self._wcrevs() or ignoreupdate:
910 911 return False
911 912 return True
912 913
913 914 def basestate(self):
914 915 lastrev, rev = self._wcrevs()
915 916 if lastrev != rev:
916 917 # Last committed rev is not the same than rev. We would
917 918 # like to take lastrev but we do not know if the subrepo
918 919 # URL exists at lastrev. Test it and fallback to rev it
919 920 # is not there.
920 921 try:
921 922 self._svncommand(['list', '%s@%s' % (self._state[0], lastrev)])
922 923 return lastrev
923 924 except error.Abort:
924 925 pass
925 926 return rev
926 927
927 928 @annotatesubrepoerror
928 929 def commit(self, text, user, date):
929 930 # user and date are out of our hands since svn is centralized
930 931 changed, extchanged, missing = self._wcchanged()
931 932 if not changed:
932 933 return self.basestate()
933 934 if extchanged:
934 935 # Do not try to commit externals
935 936 raise util.Abort(_('cannot commit svn externals'))
936 937 if missing:
937 938 # svn can commit with missing entries but aborting like hg
938 939 # seems a better approach.
939 940 raise util.Abort(_('cannot commit missing svn entries'))
940 941 commitinfo, err = self._svncommand(['commit', '-m', text])
941 942 self._ui.status(commitinfo)
942 943 newrev = re.search('Committed revision ([0-9]+).', commitinfo)
943 944 if not newrev:
944 945 if not commitinfo.strip():
945 946 # Sometimes, our definition of "changed" differs from
946 947 # svn one. For instance, svn ignores missing files
947 948 # when committing. If there are only missing files, no
948 949 # commit is made, no output and no error code.
949 950 raise util.Abort(_('failed to commit svn changes'))
950 951 raise util.Abort(commitinfo.splitlines()[-1])
951 952 newrev = newrev.groups()[0]
952 953 self._ui.status(self._svncommand(['update', '-r', newrev])[0])
953 954 return newrev
954 955
955 956 @annotatesubrepoerror
956 957 def remove(self):
957 958 if self.dirty():
958 959 self._ui.warn(_('not removing repo %s because '
959 960 'it has changes.\n' % self._path))
960 961 return
961 962 self._ui.note(_('removing subrepo %s\n') % self._path)
962 963
963 964 def onerror(function, path, excinfo):
964 965 if function is not os.remove:
965 966 raise
966 967 # read-only files cannot be unlinked under Windows
967 968 s = os.stat(path)
968 969 if (s.st_mode & stat.S_IWRITE) != 0:
969 970 raise
970 971 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
971 972 os.remove(path)
972 973
973 974 path = self._ctx._repo.wjoin(self._path)
974 975 shutil.rmtree(path, onerror=onerror)
975 976 try:
976 977 os.removedirs(os.path.dirname(path))
977 978 except OSError:
978 979 pass
979 980
980 981 @annotatesubrepoerror
981 982 def get(self, state, overwrite=False):
982 983 if overwrite:
983 984 self._svncommand(['revert', '--recursive'])
984 985 args = ['checkout']
985 986 if self._svnversion >= (1, 5):
986 987 args.append('--force')
987 988 # The revision must be specified at the end of the URL to properly
988 989 # update to a directory which has since been deleted and recreated.
989 990 args.append('%s@%s' % (state[0], state[1]))
990 991 status, err = self._svncommand(args, failok=True)
991 992 if not re.search('Checked out revision [0-9]+.', status):
992 993 if ('is already a working copy for a different URL' in err
993 994 and (self._wcchanged()[:2] == (False, False))):
994 995 # obstructed but clean working copy, so just blow it away.
995 996 self.remove()
996 997 self.get(state, overwrite=False)
997 998 return
998 999 raise util.Abort((status or err).splitlines()[-1])
999 1000 self._ui.status(status)
1000 1001
1001 1002 @annotatesubrepoerror
1002 1003 def merge(self, state):
1003 1004 old = self._state[1]
1004 1005 new = state[1]
1005 1006 wcrev = self._wcrev()
1006 1007 if new != wcrev:
1007 1008 dirty = old == wcrev or self._wcchanged()[0]
1008 1009 if _updateprompt(self._ui, self, dirty, wcrev, new):
1009 1010 self.get(state, False)
1010 1011
1011 1012 def push(self, opts):
1012 1013 # push is a no-op for SVN
1013 1014 return True
1014 1015
1015 1016 @annotatesubrepoerror
1016 1017 def files(self):
1017 1018 output = self._svncommand(['list', '--recursive', '--xml'])[0]
1018 1019 doc = xml.dom.minidom.parseString(output)
1019 1020 paths = []
1020 1021 for e in doc.getElementsByTagName('entry'):
1021 1022 kind = str(e.getAttribute('kind'))
1022 1023 if kind != 'file':
1023 1024 continue
1024 1025 name = ''.join(c.data for c
1025 1026 in e.getElementsByTagName('name')[0].childNodes
1026 1027 if c.nodeType == c.TEXT_NODE)
1027 1028 paths.append(name.encode('utf-8'))
1028 1029 return paths
1029 1030
1030 1031 def filedata(self, name):
1031 1032 return self._svncommand(['cat'], name)[0]
1032 1033
1033 1034
1034 1035 class gitsubrepo(abstractsubrepo):
1035 1036 def __init__(self, ctx, path, state):
1036 1037 self._state = state
1037 1038 self._ctx = ctx
1038 1039 self._path = path
1039 1040 self._relpath = os.path.join(reporelpath(ctx._repo), path)
1040 1041 self._abspath = ctx._repo.wjoin(path)
1041 1042 self._subparent = ctx._repo
1042 1043 self._ui = ctx._repo.ui
1043 1044 self._ensuregit()
1044 1045
1045 1046 def _ensuregit(self):
1046 1047 try:
1047 1048 self._gitexecutable = 'git'
1048 1049 out, err = self._gitnodir(['--version'])
1049 1050 except OSError, e:
1050 1051 if e.errno != 2 or os.name != 'nt':
1051 1052 raise
1052 1053 self._gitexecutable = 'git.cmd'
1053 1054 out, err = self._gitnodir(['--version'])
1054 1055 m = re.search(r'^git version (\d+)\.(\d+)\.(\d+)', out)
1055 1056 if not m:
1056 1057 self._ui.warn(_('cannot retrieve git version'))
1057 1058 return
1058 1059 version = (int(m.group(1)), m.group(2), m.group(3))
1059 1060 # git 1.4.0 can't work at all, but 1.5.X can in at least some cases,
1060 1061 # despite the docstring comment. For now, error on 1.4.0, warn on
1061 1062 # 1.5.0 but attempt to continue.
1062 1063 if version < (1, 5, 0):
1063 1064 raise util.Abort(_('git subrepo requires at least 1.6.0 or later'))
1064 1065 elif version < (1, 6, 0):
1065 1066 self._ui.warn(_('git subrepo requires at least 1.6.0 or later'))
1066 1067
1067 1068 def _gitcommand(self, commands, env=None, stream=False):
1068 1069 return self._gitdir(commands, env=env, stream=stream)[0]
1069 1070
1070 1071 def _gitdir(self, commands, env=None, stream=False):
1071 1072 return self._gitnodir(commands, env=env, stream=stream,
1072 1073 cwd=self._abspath)
1073 1074
1074 1075 def _gitnodir(self, commands, env=None, stream=False, cwd=None):
1075 1076 """Calls the git command
1076 1077
1077 1078 The methods tries to call the git command. versions prior to 1.6.0
1078 1079 are not supported and very probably fail.
1079 1080 """
1080 1081 self._ui.debug('%s: git %s\n' % (self._relpath, ' '.join(commands)))
1081 1082 # unless ui.quiet is set, print git's stderr,
1082 1083 # which is mostly progress and useful info
1083 1084 errpipe = None
1084 1085 if self._ui.quiet:
1085 1086 errpipe = open(os.devnull, 'w')
1086 1087 p = subprocess.Popen([self._gitexecutable] + commands, bufsize=-1,
1087 1088 cwd=cwd, env=env, close_fds=util.closefds,
1088 1089 stdout=subprocess.PIPE, stderr=errpipe)
1089 1090 if stream:
1090 1091 return p.stdout, None
1091 1092
1092 1093 retdata = p.stdout.read().strip()
1093 1094 # wait for the child to exit to avoid race condition.
1094 1095 p.wait()
1095 1096
1096 1097 if p.returncode != 0 and p.returncode != 1:
1097 1098 # there are certain error codes that are ok
1098 1099 command = commands[0]
1099 1100 if command in ('cat-file', 'symbolic-ref'):
1100 1101 return retdata, p.returncode
1101 1102 # for all others, abort
1102 1103 raise util.Abort('git %s error %d in %s' %
1103 1104 (command, p.returncode, self._relpath))
1104 1105
1105 1106 return retdata, p.returncode
1106 1107
1107 1108 def _gitmissing(self):
1108 1109 return not os.path.exists(os.path.join(self._abspath, '.git'))
1109 1110
1110 1111 def _gitstate(self):
1111 1112 return self._gitcommand(['rev-parse', 'HEAD'])
1112 1113
1113 1114 def _gitcurrentbranch(self):
1114 1115 current, err = self._gitdir(['symbolic-ref', 'HEAD', '--quiet'])
1115 1116 if err:
1116 1117 current = None
1117 1118 return current
1118 1119
1119 1120 def _gitremote(self, remote):
1120 1121 out = self._gitcommand(['remote', 'show', '-n', remote])
1121 1122 line = out.split('\n')[1]
1122 1123 i = line.index('URL: ') + len('URL: ')
1123 1124 return line[i:]
1124 1125
1125 1126 def _githavelocally(self, revision):
1126 1127 out, code = self._gitdir(['cat-file', '-e', revision])
1127 1128 return code == 0
1128 1129
1129 1130 def _gitisancestor(self, r1, r2):
1130 1131 base = self._gitcommand(['merge-base', r1, r2])
1131 1132 return base == r1
1132 1133
1133 1134 def _gitisbare(self):
1134 1135 return self._gitcommand(['config', '--bool', 'core.bare']) == 'true'
1135 1136
1136 1137 def _gitupdatestat(self):
1137 1138 """This must be run before git diff-index.
1138 1139 diff-index only looks at changes to file stat;
1139 1140 this command looks at file contents and updates the stat."""
1140 1141 self._gitcommand(['update-index', '-q', '--refresh'])
1141 1142
1142 1143 def _gitbranchmap(self):
1143 1144 '''returns 2 things:
1144 1145 a map from git branch to revision
1145 1146 a map from revision to branches'''
1146 1147 branch2rev = {}
1147 1148 rev2branch = {}
1148 1149
1149 1150 out = self._gitcommand(['for-each-ref', '--format',
1150 1151 '%(objectname) %(refname)'])
1151 1152 for line in out.split('\n'):
1152 1153 revision, ref = line.split(' ')
1153 1154 if (not ref.startswith('refs/heads/') and
1154 1155 not ref.startswith('refs/remotes/')):
1155 1156 continue
1156 1157 if ref.startswith('refs/remotes/') and ref.endswith('/HEAD'):
1157 1158 continue # ignore remote/HEAD redirects
1158 1159 branch2rev[ref] = revision
1159 1160 rev2branch.setdefault(revision, []).append(ref)
1160 1161 return branch2rev, rev2branch
1161 1162
1162 1163 def _gittracking(self, branches):
1163 1164 'return map of remote branch to local tracking branch'
1164 1165 # assumes no more than one local tracking branch for each remote
1165 1166 tracking = {}
1166 1167 for b in branches:
1167 1168 if b.startswith('refs/remotes/'):
1168 1169 continue
1169 1170 bname = b.split('/', 2)[2]
1170 1171 remote = self._gitcommand(['config', 'branch.%s.remote' % bname])
1171 1172 if remote:
1172 1173 ref = self._gitcommand(['config', 'branch.%s.merge' % bname])
1173 1174 tracking['refs/remotes/%s/%s' %
1174 1175 (remote, ref.split('/', 2)[2])] = b
1175 1176 return tracking
1176 1177
1177 1178 def _abssource(self, source):
1178 1179 if '://' not in source:
1179 1180 # recognize the scp syntax as an absolute source
1180 1181 colon = source.find(':')
1181 1182 if colon != -1 and '/' not in source[:colon]:
1182 1183 return source
1183 1184 self._subsource = source
1184 1185 return _abssource(self)
1185 1186
1186 1187 def _fetch(self, source, revision):
1187 1188 if self._gitmissing():
1188 1189 source = self._abssource(source)
1189 1190 self._ui.status(_('cloning subrepo %s from %s\n') %
1190 1191 (self._relpath, source))
1191 1192 self._gitnodir(['clone', source, self._abspath])
1192 1193 if self._githavelocally(revision):
1193 1194 return
1194 1195 self._ui.status(_('pulling subrepo %s from %s\n') %
1195 1196 (self._relpath, self._gitremote('origin')))
1196 1197 # try only origin: the originally cloned repo
1197 1198 self._gitcommand(['fetch'])
1198 1199 if not self._githavelocally(revision):
1199 1200 raise util.Abort(_("revision %s does not exist in subrepo %s\n") %
1200 1201 (revision, self._relpath))
1201 1202
1202 1203 @annotatesubrepoerror
1203 1204 def dirty(self, ignoreupdate=False):
1204 1205 if self._gitmissing():
1205 1206 return self._state[1] != ''
1206 1207 if self._gitisbare():
1207 1208 return True
1208 1209 if not ignoreupdate and self._state[1] != self._gitstate():
1209 1210 # different version checked out
1210 1211 return True
1211 1212 # check for staged changes or modified files; ignore untracked files
1212 1213 self._gitupdatestat()
1213 1214 out, code = self._gitdir(['diff-index', '--quiet', 'HEAD'])
1214 1215 return code == 1
1215 1216
1216 1217 def basestate(self):
1217 1218 return self._gitstate()
1218 1219
1219 1220 @annotatesubrepoerror
1220 1221 def get(self, state, overwrite=False):
1221 1222 source, revision, kind = state
1222 1223 if not revision:
1223 1224 self.remove()
1224 1225 return
1225 1226 self._fetch(source, revision)
1226 1227 # if the repo was set to be bare, unbare it
1227 1228 if self._gitisbare():
1228 1229 self._gitcommand(['config', 'core.bare', 'false'])
1229 1230 if self._gitstate() == revision:
1230 1231 self._gitcommand(['reset', '--hard', 'HEAD'])
1231 1232 return
1232 1233 elif self._gitstate() == revision:
1233 1234 if overwrite:
1234 1235 # first reset the index to unmark new files for commit, because
1235 1236 # reset --hard will otherwise throw away files added for commit,
1236 1237 # not just unmark them.
1237 1238 self._gitcommand(['reset', 'HEAD'])
1238 1239 self._gitcommand(['reset', '--hard', 'HEAD'])
1239 1240 return
1240 1241 branch2rev, rev2branch = self._gitbranchmap()
1241 1242
1242 1243 def checkout(args):
1243 1244 cmd = ['checkout']
1244 1245 if overwrite:
1245 1246 # first reset the index to unmark new files for commit, because
1246 1247 # the -f option will otherwise throw away files added for
1247 1248 # commit, not just unmark them.
1248 1249 self._gitcommand(['reset', 'HEAD'])
1249 1250 cmd.append('-f')
1250 1251 self._gitcommand(cmd + args)
1251 1252
1252 1253 def rawcheckout():
1253 1254 # no branch to checkout, check it out with no branch
1254 1255 self._ui.warn(_('checking out detached HEAD in subrepo %s\n') %
1255 1256 self._relpath)
1256 1257 self._ui.warn(_('check out a git branch if you intend '
1257 1258 'to make changes\n'))
1258 1259 checkout(['-q', revision])
1259 1260
1260 1261 if revision not in rev2branch:
1261 1262 rawcheckout()
1262 1263 return
1263 1264 branches = rev2branch[revision]
1264 1265 firstlocalbranch = None
1265 1266 for b in branches:
1266 1267 if b == 'refs/heads/master':
1267 1268 # master trumps all other branches
1268 1269 checkout(['refs/heads/master'])
1269 1270 return
1270 1271 if not firstlocalbranch and not b.startswith('refs/remotes/'):
1271 1272 firstlocalbranch = b
1272 1273 if firstlocalbranch:
1273 1274 checkout([firstlocalbranch])
1274 1275 return
1275 1276
1276 1277 tracking = self._gittracking(branch2rev.keys())
1277 1278 # choose a remote branch already tracked if possible
1278 1279 remote = branches[0]
1279 1280 if remote not in tracking:
1280 1281 for b in branches:
1281 1282 if b in tracking:
1282 1283 remote = b
1283 1284 break
1284 1285
1285 1286 if remote not in tracking:
1286 1287 # create a new local tracking branch
1287 1288 local = remote.split('/', 3)[3]
1288 1289 checkout(['-b', local, remote])
1289 1290 elif self._gitisancestor(branch2rev[tracking[remote]], remote):
1290 1291 # When updating to a tracked remote branch,
1291 1292 # if the local tracking branch is downstream of it,
1292 1293 # a normal `git pull` would have performed a "fast-forward merge"
1293 1294 # which is equivalent to updating the local branch to the remote.
1294 1295 # Since we are only looking at branching at update, we need to
1295 1296 # detect this situation and perform this action lazily.
1296 1297 if tracking[remote] != self._gitcurrentbranch():
1297 1298 checkout([tracking[remote]])
1298 1299 self._gitcommand(['merge', '--ff', remote])
1299 1300 else:
1300 1301 # a real merge would be required, just checkout the revision
1301 1302 rawcheckout()
1302 1303
1303 1304 @annotatesubrepoerror
1304 1305 def commit(self, text, user, date):
1305 1306 if self._gitmissing():
1306 1307 raise util.Abort(_("subrepo %s is missing") % self._relpath)
1307 1308 cmd = ['commit', '-a', '-m', text]
1308 1309 env = os.environ.copy()
1309 1310 if user:
1310 1311 cmd += ['--author', user]
1311 1312 if date:
1312 1313 # git's date parser silently ignores when seconds < 1e9
1313 1314 # convert to ISO8601
1314 1315 env['GIT_AUTHOR_DATE'] = util.datestr(date,
1315 1316 '%Y-%m-%dT%H:%M:%S %1%2')
1316 1317 self._gitcommand(cmd, env=env)
1317 1318 # make sure commit works otherwise HEAD might not exist under certain
1318 1319 # circumstances
1319 1320 return self._gitstate()
1320 1321
1321 1322 @annotatesubrepoerror
1322 1323 def merge(self, state):
1323 1324 source, revision, kind = state
1324 1325 self._fetch(source, revision)
1325 1326 base = self._gitcommand(['merge-base', revision, self._state[1]])
1326 1327 self._gitupdatestat()
1327 1328 out, code = self._gitdir(['diff-index', '--quiet', 'HEAD'])
1328 1329
1329 1330 def mergefunc():
1330 1331 if base == revision:
1331 1332 self.get(state) # fast forward merge
1332 1333 elif base != self._state[1]:
1333 1334 self._gitcommand(['merge', '--no-commit', revision])
1334 1335
1335 1336 if self.dirty():
1336 1337 if self._gitstate() != revision:
1337 1338 dirty = self._gitstate() == self._state[1] or code != 0
1338 1339 if _updateprompt(self._ui, self, dirty,
1339 1340 self._state[1][:7], revision[:7]):
1340 1341 mergefunc()
1341 1342 else:
1342 1343 mergefunc()
1343 1344
1344 1345 @annotatesubrepoerror
1345 1346 def push(self, opts):
1346 1347 force = opts.get('force')
1347 1348
1348 1349 if not self._state[1]:
1349 1350 return True
1350 1351 if self._gitmissing():
1351 1352 raise util.Abort(_("subrepo %s is missing") % self._relpath)
1352 1353 # if a branch in origin contains the revision, nothing to do
1353 1354 branch2rev, rev2branch = self._gitbranchmap()
1354 1355 if self._state[1] in rev2branch:
1355 1356 for b in rev2branch[self._state[1]]:
1356 1357 if b.startswith('refs/remotes/origin/'):
1357 1358 return True
1358 1359 for b, revision in branch2rev.iteritems():
1359 1360 if b.startswith('refs/remotes/origin/'):
1360 1361 if self._gitisancestor(self._state[1], revision):
1361 1362 return True
1362 1363 # otherwise, try to push the currently checked out branch
1363 1364 cmd = ['push']
1364 1365 if force:
1365 1366 cmd.append('--force')
1366 1367
1367 1368 current = self._gitcurrentbranch()
1368 1369 if current:
1369 1370 # determine if the current branch is even useful
1370 1371 if not self._gitisancestor(self._state[1], current):
1371 1372 self._ui.warn(_('unrelated git branch checked out '
1372 1373 'in subrepo %s\n') % self._relpath)
1373 1374 return False
1374 1375 self._ui.status(_('pushing branch %s of subrepo %s\n') %
1375 1376 (current.split('/', 2)[2], self._relpath))
1376 1377 self._gitcommand(cmd + ['origin', current])
1377 1378 return True
1378 1379 else:
1379 1380 self._ui.warn(_('no branch checked out in subrepo %s\n'
1380 1381 'cannot push revision %s\n') %
1381 1382 (self._relpath, self._state[1]))
1382 1383 return False
1383 1384
1384 1385 @annotatesubrepoerror
1385 1386 def remove(self):
1386 1387 if self._gitmissing():
1387 1388 return
1388 1389 if self.dirty():
1389 1390 self._ui.warn(_('not removing repo %s because '
1390 1391 'it has changes.\n') % self._relpath)
1391 1392 return
1392 1393 # we can't fully delete the repository as it may contain
1393 1394 # local-only history
1394 1395 self._ui.note(_('removing subrepo %s\n') % self._relpath)
1395 1396 self._gitcommand(['config', 'core.bare', 'true'])
1396 1397 for f in os.listdir(self._abspath):
1397 1398 if f == '.git':
1398 1399 continue
1399 1400 path = os.path.join(self._abspath, f)
1400 1401 if os.path.isdir(path) and not os.path.islink(path):
1401 1402 shutil.rmtree(path)
1402 1403 else:
1403 1404 os.remove(path)
1404 1405
1405 1406 def archive(self, ui, archiver, prefix, match=None):
1406 1407 total = 0
1407 1408 source, revision = self._state
1408 1409 if not revision:
1409 1410 return total
1410 1411 self._fetch(source, revision)
1411 1412
1412 1413 # Parse git's native archive command.
1413 1414 # This should be much faster than manually traversing the trees
1414 1415 # and objects with many subprocess calls.
1415 1416 tarstream = self._gitcommand(['archive', revision], stream=True)
1416 1417 tar = tarfile.open(fileobj=tarstream, mode='r|')
1417 1418 relpath = subrelpath(self)
1418 1419 ui.progress(_('archiving (%s)') % relpath, 0, unit=_('files'))
1419 1420 for i, info in enumerate(tar):
1420 1421 if info.isdir():
1421 1422 continue
1422 1423 if match and not match(info.name):
1423 1424 continue
1424 1425 if info.issym():
1425 1426 data = info.linkname
1426 1427 else:
1427 1428 data = tar.extractfile(info).read()
1428 1429 archiver.addfile(os.path.join(prefix, self._path, info.name),
1429 1430 info.mode, info.issym(), data)
1430 1431 total += 1
1431 1432 ui.progress(_('archiving (%s)') % relpath, i + 1,
1432 1433 unit=_('files'))
1433 1434 ui.progress(_('archiving (%s)') % relpath, None)
1434 1435 return total
1435 1436
1436 1437
1437 1438 @annotatesubrepoerror
1438 1439 def status(self, rev2, **opts):
1439 1440 rev1 = self._state[1]
1440 1441 if self._gitmissing() or not rev1:
1441 1442 # if the repo is missing, return no results
1442 1443 return [], [], [], [], [], [], []
1443 1444 modified, added, removed = [], [], []
1444 1445 self._gitupdatestat()
1445 1446 if rev2:
1446 1447 command = ['diff-tree', rev1, rev2]
1447 1448 else:
1448 1449 command = ['diff-index', rev1]
1449 1450 out = self._gitcommand(command)
1450 1451 for line in out.split('\n'):
1451 1452 tab = line.find('\t')
1452 1453 if tab == -1:
1453 1454 continue
1454 1455 status, f = line[tab - 1], line[tab + 1:]
1455 1456 if status == 'M':
1456 1457 modified.append(f)
1457 1458 elif status == 'A':
1458 1459 added.append(f)
1459 1460 elif status == 'D':
1460 1461 removed.append(f)
1461 1462
1462 1463 deleted = unknown = ignored = clean = []
1463 1464 return modified, added, removed, deleted, unknown, ignored, clean
1464 1465
1465 1466 types = {
1466 1467 'hg': hgsubrepo,
1467 1468 'svn': svnsubrepo,
1468 1469 'git': gitsubrepo,
1469 1470 }
General Comments 0
You need to be logged in to leave comments. Login now