##// END OF EJS Templates
addchangegroup: use a postrelease callback to call changegroup hook...
Pierre-Yves David -
r15584:9df9444e default
parent child Browse files
Show More
@@ -1,2157 +1,2160 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import scmutil, util, extensions, hook, error, revset
13 import scmutil, util, extensions, hook, error, revset
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 from lock import release
17 from lock import release
18 import weakref, errno, os, time, inspect
18 import weakref, errno, os, time, inspect
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20 filecache = scmutil.filecache
20 filecache = scmutil.filecache
21
21
22 class localrepository(repo.repository):
22 class localrepository(repo.repository):
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
24 'known', 'getbundle'))
24 'known', 'getbundle'))
25 supportedformats = set(('revlogv1', 'generaldelta'))
25 supportedformats = set(('revlogv1', 'generaldelta'))
26 supported = supportedformats | set(('store', 'fncache', 'shared',
26 supported = supportedformats | set(('store', 'fncache', 'shared',
27 'dotencode'))
27 'dotencode'))
28
28
29 def __init__(self, baseui, path=None, create=False):
29 def __init__(self, baseui, path=None, create=False):
30 repo.repository.__init__(self)
30 repo.repository.__init__(self)
31 self.root = os.path.realpath(util.expandpath(path))
31 self.root = os.path.realpath(util.expandpath(path))
32 self.path = os.path.join(self.root, ".hg")
32 self.path = os.path.join(self.root, ".hg")
33 self.origroot = path
33 self.origroot = path
34 self.auditor = scmutil.pathauditor(self.root, self._checknested)
34 self.auditor = scmutil.pathauditor(self.root, self._checknested)
35 self.opener = scmutil.opener(self.path)
35 self.opener = scmutil.opener(self.path)
36 self.wopener = scmutil.opener(self.root)
36 self.wopener = scmutil.opener(self.root)
37 self.baseui = baseui
37 self.baseui = baseui
38 self.ui = baseui.copy()
38 self.ui = baseui.copy()
39 self._dirtyphases = False
39 self._dirtyphases = False
40
40
41 try:
41 try:
42 self.ui.readconfig(self.join("hgrc"), self.root)
42 self.ui.readconfig(self.join("hgrc"), self.root)
43 extensions.loadall(self.ui)
43 extensions.loadall(self.ui)
44 except IOError:
44 except IOError:
45 pass
45 pass
46
46
47 if not os.path.isdir(self.path):
47 if not os.path.isdir(self.path):
48 if create:
48 if create:
49 if not os.path.exists(path):
49 if not os.path.exists(path):
50 util.makedirs(path)
50 util.makedirs(path)
51 util.makedir(self.path, notindexed=True)
51 util.makedir(self.path, notindexed=True)
52 requirements = ["revlogv1"]
52 requirements = ["revlogv1"]
53 if self.ui.configbool('format', 'usestore', True):
53 if self.ui.configbool('format', 'usestore', True):
54 os.mkdir(os.path.join(self.path, "store"))
54 os.mkdir(os.path.join(self.path, "store"))
55 requirements.append("store")
55 requirements.append("store")
56 if self.ui.configbool('format', 'usefncache', True):
56 if self.ui.configbool('format', 'usefncache', True):
57 requirements.append("fncache")
57 requirements.append("fncache")
58 if self.ui.configbool('format', 'dotencode', True):
58 if self.ui.configbool('format', 'dotencode', True):
59 requirements.append('dotencode')
59 requirements.append('dotencode')
60 # create an invalid changelog
60 # create an invalid changelog
61 self.opener.append(
61 self.opener.append(
62 "00changelog.i",
62 "00changelog.i",
63 '\0\0\0\2' # represents revlogv2
63 '\0\0\0\2' # represents revlogv2
64 ' dummy changelog to prevent using the old repo layout'
64 ' dummy changelog to prevent using the old repo layout'
65 )
65 )
66 if self.ui.configbool('format', 'generaldelta', False):
66 if self.ui.configbool('format', 'generaldelta', False):
67 requirements.append("generaldelta")
67 requirements.append("generaldelta")
68 requirements = set(requirements)
68 requirements = set(requirements)
69 else:
69 else:
70 raise error.RepoError(_("repository %s not found") % path)
70 raise error.RepoError(_("repository %s not found") % path)
71 elif create:
71 elif create:
72 raise error.RepoError(_("repository %s already exists") % path)
72 raise error.RepoError(_("repository %s already exists") % path)
73 else:
73 else:
74 try:
74 try:
75 requirements = scmutil.readrequires(self.opener, self.supported)
75 requirements = scmutil.readrequires(self.opener, self.supported)
76 except IOError, inst:
76 except IOError, inst:
77 if inst.errno != errno.ENOENT:
77 if inst.errno != errno.ENOENT:
78 raise
78 raise
79 requirements = set()
79 requirements = set()
80
80
81 self.sharedpath = self.path
81 self.sharedpath = self.path
82 try:
82 try:
83 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
83 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
84 if not os.path.exists(s):
84 if not os.path.exists(s):
85 raise error.RepoError(
85 raise error.RepoError(
86 _('.hg/sharedpath points to nonexistent directory %s') % s)
86 _('.hg/sharedpath points to nonexistent directory %s') % s)
87 self.sharedpath = s
87 self.sharedpath = s
88 except IOError, inst:
88 except IOError, inst:
89 if inst.errno != errno.ENOENT:
89 if inst.errno != errno.ENOENT:
90 raise
90 raise
91
91
92 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
92 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
93 self.spath = self.store.path
93 self.spath = self.store.path
94 self.sopener = self.store.opener
94 self.sopener = self.store.opener
95 self.sjoin = self.store.join
95 self.sjoin = self.store.join
96 self.opener.createmode = self.store.createmode
96 self.opener.createmode = self.store.createmode
97 self._applyrequirements(requirements)
97 self._applyrequirements(requirements)
98 if create:
98 if create:
99 self._writerequirements()
99 self._writerequirements()
100
100
101
101
102 self._branchcache = None
102 self._branchcache = None
103 self._branchcachetip = None
103 self._branchcachetip = None
104 self.filterpats = {}
104 self.filterpats = {}
105 self._datafilters = {}
105 self._datafilters = {}
106 self._transref = self._lockref = self._wlockref = None
106 self._transref = self._lockref = self._wlockref = None
107
107
108 # A cache for various files under .hg/ that tracks file changes,
108 # A cache for various files under .hg/ that tracks file changes,
109 # (used by the filecache decorator)
109 # (used by the filecache decorator)
110 #
110 #
111 # Maps a property name to its util.filecacheentry
111 # Maps a property name to its util.filecacheentry
112 self._filecache = {}
112 self._filecache = {}
113
113
114 def _applyrequirements(self, requirements):
114 def _applyrequirements(self, requirements):
115 self.requirements = requirements
115 self.requirements = requirements
116 openerreqs = set(('revlogv1', 'generaldelta'))
116 openerreqs = set(('revlogv1', 'generaldelta'))
117 self.sopener.options = dict((r, 1) for r in requirements
117 self.sopener.options = dict((r, 1) for r in requirements
118 if r in openerreqs)
118 if r in openerreqs)
119
119
120 def _writerequirements(self):
120 def _writerequirements(self):
121 reqfile = self.opener("requires", "w")
121 reqfile = self.opener("requires", "w")
122 for r in self.requirements:
122 for r in self.requirements:
123 reqfile.write("%s\n" % r)
123 reqfile.write("%s\n" % r)
124 reqfile.close()
124 reqfile.close()
125
125
126 def _checknested(self, path):
126 def _checknested(self, path):
127 """Determine if path is a legal nested repository."""
127 """Determine if path is a legal nested repository."""
128 if not path.startswith(self.root):
128 if not path.startswith(self.root):
129 return False
129 return False
130 subpath = path[len(self.root) + 1:]
130 subpath = path[len(self.root) + 1:]
131
131
132 # XXX: Checking against the current working copy is wrong in
132 # XXX: Checking against the current working copy is wrong in
133 # the sense that it can reject things like
133 # the sense that it can reject things like
134 #
134 #
135 # $ hg cat -r 10 sub/x.txt
135 # $ hg cat -r 10 sub/x.txt
136 #
136 #
137 # if sub/ is no longer a subrepository in the working copy
137 # if sub/ is no longer a subrepository in the working copy
138 # parent revision.
138 # parent revision.
139 #
139 #
140 # However, it can of course also allow things that would have
140 # However, it can of course also allow things that would have
141 # been rejected before, such as the above cat command if sub/
141 # been rejected before, such as the above cat command if sub/
142 # is a subrepository now, but was a normal directory before.
142 # is a subrepository now, but was a normal directory before.
143 # The old path auditor would have rejected by mistake since it
143 # The old path auditor would have rejected by mistake since it
144 # panics when it sees sub/.hg/.
144 # panics when it sees sub/.hg/.
145 #
145 #
146 # All in all, checking against the working copy seems sensible
146 # All in all, checking against the working copy seems sensible
147 # since we want to prevent access to nested repositories on
147 # since we want to prevent access to nested repositories on
148 # the filesystem *now*.
148 # the filesystem *now*.
149 ctx = self[None]
149 ctx = self[None]
150 parts = util.splitpath(subpath)
150 parts = util.splitpath(subpath)
151 while parts:
151 while parts:
152 prefix = os.sep.join(parts)
152 prefix = os.sep.join(parts)
153 if prefix in ctx.substate:
153 if prefix in ctx.substate:
154 if prefix == subpath:
154 if prefix == subpath:
155 return True
155 return True
156 else:
156 else:
157 sub = ctx.sub(prefix)
157 sub = ctx.sub(prefix)
158 return sub.checknested(subpath[len(prefix) + 1:])
158 return sub.checknested(subpath[len(prefix) + 1:])
159 else:
159 else:
160 parts.pop()
160 parts.pop()
161 return False
161 return False
162
162
163 @filecache('bookmarks')
163 @filecache('bookmarks')
164 def _bookmarks(self):
164 def _bookmarks(self):
165 return bookmarks.read(self)
165 return bookmarks.read(self)
166
166
167 @filecache('bookmarks.current')
167 @filecache('bookmarks.current')
168 def _bookmarkcurrent(self):
168 def _bookmarkcurrent(self):
169 return bookmarks.readcurrent(self)
169 return bookmarks.readcurrent(self)
170
170
171 def _writebookmarks(self, marks):
171 def _writebookmarks(self, marks):
172 bookmarks.write(self)
172 bookmarks.write(self)
173
173
174 @filecache('phaseroots')
174 @filecache('phaseroots')
175 def _phaseroots(self):
175 def _phaseroots(self):
176 self._dirtyphases = False
176 self._dirtyphases = False
177 phaseroots = phases.readroots(self)
177 phaseroots = phases.readroots(self)
178 phases.filterunknown(self, phaseroots)
178 phases.filterunknown(self, phaseroots)
179 return phaseroots
179 return phaseroots
180
180
181 @propertycache
181 @propertycache
182 def _phaserev(self):
182 def _phaserev(self):
183 cache = [0] * len(self)
183 cache = [0] * len(self)
184 for phase in phases.trackedphases:
184 for phase in phases.trackedphases:
185 roots = map(self.changelog.rev, self._phaseroots[phase])
185 roots = map(self.changelog.rev, self._phaseroots[phase])
186 if roots:
186 if roots:
187 for rev in roots:
187 for rev in roots:
188 cache[rev] = phase
188 cache[rev] = phase
189 for rev in self.changelog.descendants(*roots):
189 for rev in self.changelog.descendants(*roots):
190 cache[rev] = phase
190 cache[rev] = phase
191 return cache
191 return cache
192
192
193 @filecache('00changelog.i', True)
193 @filecache('00changelog.i', True)
194 def changelog(self):
194 def changelog(self):
195 c = changelog.changelog(self.sopener)
195 c = changelog.changelog(self.sopener)
196 if 'HG_PENDING' in os.environ:
196 if 'HG_PENDING' in os.environ:
197 p = os.environ['HG_PENDING']
197 p = os.environ['HG_PENDING']
198 if p.startswith(self.root):
198 if p.startswith(self.root):
199 c.readpending('00changelog.i.a')
199 c.readpending('00changelog.i.a')
200 return c
200 return c
201
201
202 @filecache('00manifest.i', True)
202 @filecache('00manifest.i', True)
203 def manifest(self):
203 def manifest(self):
204 return manifest.manifest(self.sopener)
204 return manifest.manifest(self.sopener)
205
205
206 @filecache('dirstate')
206 @filecache('dirstate')
207 def dirstate(self):
207 def dirstate(self):
208 warned = [0]
208 warned = [0]
209 def validate(node):
209 def validate(node):
210 try:
210 try:
211 self.changelog.rev(node)
211 self.changelog.rev(node)
212 return node
212 return node
213 except error.LookupError:
213 except error.LookupError:
214 if not warned[0]:
214 if not warned[0]:
215 warned[0] = True
215 warned[0] = True
216 self.ui.warn(_("warning: ignoring unknown"
216 self.ui.warn(_("warning: ignoring unknown"
217 " working parent %s!\n") % short(node))
217 " working parent %s!\n") % short(node))
218 return nullid
218 return nullid
219
219
220 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
220 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
221
221
222 def __getitem__(self, changeid):
222 def __getitem__(self, changeid):
223 if changeid is None:
223 if changeid is None:
224 return context.workingctx(self)
224 return context.workingctx(self)
225 return context.changectx(self, changeid)
225 return context.changectx(self, changeid)
226
226
227 def __contains__(self, changeid):
227 def __contains__(self, changeid):
228 try:
228 try:
229 return bool(self.lookup(changeid))
229 return bool(self.lookup(changeid))
230 except error.RepoLookupError:
230 except error.RepoLookupError:
231 return False
231 return False
232
232
233 def __nonzero__(self):
233 def __nonzero__(self):
234 return True
234 return True
235
235
236 def __len__(self):
236 def __len__(self):
237 return len(self.changelog)
237 return len(self.changelog)
238
238
239 def __iter__(self):
239 def __iter__(self):
240 for i in xrange(len(self)):
240 for i in xrange(len(self)):
241 yield i
241 yield i
242
242
243 def revs(self, expr, *args):
243 def revs(self, expr, *args):
244 '''Return a list of revisions matching the given revset'''
244 '''Return a list of revisions matching the given revset'''
245 expr = revset.formatspec(expr, *args)
245 expr = revset.formatspec(expr, *args)
246 m = revset.match(None, expr)
246 m = revset.match(None, expr)
247 return [r for r in m(self, range(len(self)))]
247 return [r for r in m(self, range(len(self)))]
248
248
249 def set(self, expr, *args):
249 def set(self, expr, *args):
250 '''
250 '''
251 Yield a context for each matching revision, after doing arg
251 Yield a context for each matching revision, after doing arg
252 replacement via revset.formatspec
252 replacement via revset.formatspec
253 '''
253 '''
254 for r in self.revs(expr, *args):
254 for r in self.revs(expr, *args):
255 yield self[r]
255 yield self[r]
256
256
257 def url(self):
257 def url(self):
258 return 'file:' + self.root
258 return 'file:' + self.root
259
259
260 def hook(self, name, throw=False, **args):
260 def hook(self, name, throw=False, **args):
261 return hook.hook(self.ui, self, name, throw, **args)
261 return hook.hook(self.ui, self, name, throw, **args)
262
262
263 tag_disallowed = ':\r\n'
263 tag_disallowed = ':\r\n'
264
264
265 def _tag(self, names, node, message, local, user, date, extra={}):
265 def _tag(self, names, node, message, local, user, date, extra={}):
266 if isinstance(names, str):
266 if isinstance(names, str):
267 allchars = names
267 allchars = names
268 names = (names,)
268 names = (names,)
269 else:
269 else:
270 allchars = ''.join(names)
270 allchars = ''.join(names)
271 for c in self.tag_disallowed:
271 for c in self.tag_disallowed:
272 if c in allchars:
272 if c in allchars:
273 raise util.Abort(_('%r cannot be used in a tag name') % c)
273 raise util.Abort(_('%r cannot be used in a tag name') % c)
274
274
275 branches = self.branchmap()
275 branches = self.branchmap()
276 for name in names:
276 for name in names:
277 self.hook('pretag', throw=True, node=hex(node), tag=name,
277 self.hook('pretag', throw=True, node=hex(node), tag=name,
278 local=local)
278 local=local)
279 if name in branches:
279 if name in branches:
280 self.ui.warn(_("warning: tag %s conflicts with existing"
280 self.ui.warn(_("warning: tag %s conflicts with existing"
281 " branch name\n") % name)
281 " branch name\n") % name)
282
282
283 def writetags(fp, names, munge, prevtags):
283 def writetags(fp, names, munge, prevtags):
284 fp.seek(0, 2)
284 fp.seek(0, 2)
285 if prevtags and prevtags[-1] != '\n':
285 if prevtags and prevtags[-1] != '\n':
286 fp.write('\n')
286 fp.write('\n')
287 for name in names:
287 for name in names:
288 m = munge and munge(name) or name
288 m = munge and munge(name) or name
289 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
289 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
290 old = self.tags().get(name, nullid)
290 old = self.tags().get(name, nullid)
291 fp.write('%s %s\n' % (hex(old), m))
291 fp.write('%s %s\n' % (hex(old), m))
292 fp.write('%s %s\n' % (hex(node), m))
292 fp.write('%s %s\n' % (hex(node), m))
293 fp.close()
293 fp.close()
294
294
295 prevtags = ''
295 prevtags = ''
296 if local:
296 if local:
297 try:
297 try:
298 fp = self.opener('localtags', 'r+')
298 fp = self.opener('localtags', 'r+')
299 except IOError:
299 except IOError:
300 fp = self.opener('localtags', 'a')
300 fp = self.opener('localtags', 'a')
301 else:
301 else:
302 prevtags = fp.read()
302 prevtags = fp.read()
303
303
304 # local tags are stored in the current charset
304 # local tags are stored in the current charset
305 writetags(fp, names, None, prevtags)
305 writetags(fp, names, None, prevtags)
306 for name in names:
306 for name in names:
307 self.hook('tag', node=hex(node), tag=name, local=local)
307 self.hook('tag', node=hex(node), tag=name, local=local)
308 return
308 return
309
309
310 try:
310 try:
311 fp = self.wfile('.hgtags', 'rb+')
311 fp = self.wfile('.hgtags', 'rb+')
312 except IOError, e:
312 except IOError, e:
313 if e.errno != errno.ENOENT:
313 if e.errno != errno.ENOENT:
314 raise
314 raise
315 fp = self.wfile('.hgtags', 'ab')
315 fp = self.wfile('.hgtags', 'ab')
316 else:
316 else:
317 prevtags = fp.read()
317 prevtags = fp.read()
318
318
319 # committed tags are stored in UTF-8
319 # committed tags are stored in UTF-8
320 writetags(fp, names, encoding.fromlocal, prevtags)
320 writetags(fp, names, encoding.fromlocal, prevtags)
321
321
322 fp.close()
322 fp.close()
323
323
324 if '.hgtags' not in self.dirstate:
324 if '.hgtags' not in self.dirstate:
325 self[None].add(['.hgtags'])
325 self[None].add(['.hgtags'])
326
326
327 m = matchmod.exact(self.root, '', ['.hgtags'])
327 m = matchmod.exact(self.root, '', ['.hgtags'])
328 tagnode = self.commit(message, user, date, extra=extra, match=m)
328 tagnode = self.commit(message, user, date, extra=extra, match=m)
329
329
330 for name in names:
330 for name in names:
331 self.hook('tag', node=hex(node), tag=name, local=local)
331 self.hook('tag', node=hex(node), tag=name, local=local)
332
332
333 return tagnode
333 return tagnode
334
334
335 def tag(self, names, node, message, local, user, date):
335 def tag(self, names, node, message, local, user, date):
336 '''tag a revision with one or more symbolic names.
336 '''tag a revision with one or more symbolic names.
337
337
338 names is a list of strings or, when adding a single tag, names may be a
338 names is a list of strings or, when adding a single tag, names may be a
339 string.
339 string.
340
340
341 if local is True, the tags are stored in a per-repository file.
341 if local is True, the tags are stored in a per-repository file.
342 otherwise, they are stored in the .hgtags file, and a new
342 otherwise, they are stored in the .hgtags file, and a new
343 changeset is committed with the change.
343 changeset is committed with the change.
344
344
345 keyword arguments:
345 keyword arguments:
346
346
347 local: whether to store tags in non-version-controlled file
347 local: whether to store tags in non-version-controlled file
348 (default False)
348 (default False)
349
349
350 message: commit message to use if committing
350 message: commit message to use if committing
351
351
352 user: name of user to use if committing
352 user: name of user to use if committing
353
353
354 date: date tuple to use if committing'''
354 date: date tuple to use if committing'''
355
355
356 if not local:
356 if not local:
357 for x in self.status()[:5]:
357 for x in self.status()[:5]:
358 if '.hgtags' in x:
358 if '.hgtags' in x:
359 raise util.Abort(_('working copy of .hgtags is changed '
359 raise util.Abort(_('working copy of .hgtags is changed '
360 '(please commit .hgtags manually)'))
360 '(please commit .hgtags manually)'))
361
361
362 self.tags() # instantiate the cache
362 self.tags() # instantiate the cache
363 self._tag(names, node, message, local, user, date)
363 self._tag(names, node, message, local, user, date)
364
364
365 @propertycache
365 @propertycache
366 def _tagscache(self):
366 def _tagscache(self):
367 '''Returns a tagscache object that contains various tags related caches.'''
367 '''Returns a tagscache object that contains various tags related caches.'''
368
368
369 # This simplifies its cache management by having one decorated
369 # This simplifies its cache management by having one decorated
370 # function (this one) and the rest simply fetch things from it.
370 # function (this one) and the rest simply fetch things from it.
371 class tagscache(object):
371 class tagscache(object):
372 def __init__(self):
372 def __init__(self):
373 # These two define the set of tags for this repository. tags
373 # These two define the set of tags for this repository. tags
374 # maps tag name to node; tagtypes maps tag name to 'global' or
374 # maps tag name to node; tagtypes maps tag name to 'global' or
375 # 'local'. (Global tags are defined by .hgtags across all
375 # 'local'. (Global tags are defined by .hgtags across all
376 # heads, and local tags are defined in .hg/localtags.)
376 # heads, and local tags are defined in .hg/localtags.)
377 # They constitute the in-memory cache of tags.
377 # They constitute the in-memory cache of tags.
378 self.tags = self.tagtypes = None
378 self.tags = self.tagtypes = None
379
379
380 self.nodetagscache = self.tagslist = None
380 self.nodetagscache = self.tagslist = None
381
381
382 cache = tagscache()
382 cache = tagscache()
383 cache.tags, cache.tagtypes = self._findtags()
383 cache.tags, cache.tagtypes = self._findtags()
384
384
385 return cache
385 return cache
386
386
387 def tags(self):
387 def tags(self):
388 '''return a mapping of tag to node'''
388 '''return a mapping of tag to node'''
389 return self._tagscache.tags
389 return self._tagscache.tags
390
390
391 def _findtags(self):
391 def _findtags(self):
392 '''Do the hard work of finding tags. Return a pair of dicts
392 '''Do the hard work of finding tags. Return a pair of dicts
393 (tags, tagtypes) where tags maps tag name to node, and tagtypes
393 (tags, tagtypes) where tags maps tag name to node, and tagtypes
394 maps tag name to a string like \'global\' or \'local\'.
394 maps tag name to a string like \'global\' or \'local\'.
395 Subclasses or extensions are free to add their own tags, but
395 Subclasses or extensions are free to add their own tags, but
396 should be aware that the returned dicts will be retained for the
396 should be aware that the returned dicts will be retained for the
397 duration of the localrepo object.'''
397 duration of the localrepo object.'''
398
398
399 # XXX what tagtype should subclasses/extensions use? Currently
399 # XXX what tagtype should subclasses/extensions use? Currently
400 # mq and bookmarks add tags, but do not set the tagtype at all.
400 # mq and bookmarks add tags, but do not set the tagtype at all.
401 # Should each extension invent its own tag type? Should there
401 # Should each extension invent its own tag type? Should there
402 # be one tagtype for all such "virtual" tags? Or is the status
402 # be one tagtype for all such "virtual" tags? Or is the status
403 # quo fine?
403 # quo fine?
404
404
405 alltags = {} # map tag name to (node, hist)
405 alltags = {} # map tag name to (node, hist)
406 tagtypes = {}
406 tagtypes = {}
407
407
408 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
408 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
409 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
409 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
410
410
411 # Build the return dicts. Have to re-encode tag names because
411 # Build the return dicts. Have to re-encode tag names because
412 # the tags module always uses UTF-8 (in order not to lose info
412 # the tags module always uses UTF-8 (in order not to lose info
413 # writing to the cache), but the rest of Mercurial wants them in
413 # writing to the cache), but the rest of Mercurial wants them in
414 # local encoding.
414 # local encoding.
415 tags = {}
415 tags = {}
416 for (name, (node, hist)) in alltags.iteritems():
416 for (name, (node, hist)) in alltags.iteritems():
417 if node != nullid:
417 if node != nullid:
418 try:
418 try:
419 # ignore tags to unknown nodes
419 # ignore tags to unknown nodes
420 self.changelog.lookup(node)
420 self.changelog.lookup(node)
421 tags[encoding.tolocal(name)] = node
421 tags[encoding.tolocal(name)] = node
422 except error.LookupError:
422 except error.LookupError:
423 pass
423 pass
424 tags['tip'] = self.changelog.tip()
424 tags['tip'] = self.changelog.tip()
425 tagtypes = dict([(encoding.tolocal(name), value)
425 tagtypes = dict([(encoding.tolocal(name), value)
426 for (name, value) in tagtypes.iteritems()])
426 for (name, value) in tagtypes.iteritems()])
427 return (tags, tagtypes)
427 return (tags, tagtypes)
428
428
429 def tagtype(self, tagname):
429 def tagtype(self, tagname):
430 '''
430 '''
431 return the type of the given tag. result can be:
431 return the type of the given tag. result can be:
432
432
433 'local' : a local tag
433 'local' : a local tag
434 'global' : a global tag
434 'global' : a global tag
435 None : tag does not exist
435 None : tag does not exist
436 '''
436 '''
437
437
438 return self._tagscache.tagtypes.get(tagname)
438 return self._tagscache.tagtypes.get(tagname)
439
439
440 def tagslist(self):
440 def tagslist(self):
441 '''return a list of tags ordered by revision'''
441 '''return a list of tags ordered by revision'''
442 if not self._tagscache.tagslist:
442 if not self._tagscache.tagslist:
443 l = []
443 l = []
444 for t, n in self.tags().iteritems():
444 for t, n in self.tags().iteritems():
445 r = self.changelog.rev(n)
445 r = self.changelog.rev(n)
446 l.append((r, t, n))
446 l.append((r, t, n))
447 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
447 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
448
448
449 return self._tagscache.tagslist
449 return self._tagscache.tagslist
450
450
451 def nodetags(self, node):
451 def nodetags(self, node):
452 '''return the tags associated with a node'''
452 '''return the tags associated with a node'''
453 if not self._tagscache.nodetagscache:
453 if not self._tagscache.nodetagscache:
454 nodetagscache = {}
454 nodetagscache = {}
455 for t, n in self.tags().iteritems():
455 for t, n in self.tags().iteritems():
456 nodetagscache.setdefault(n, []).append(t)
456 nodetagscache.setdefault(n, []).append(t)
457 for tags in nodetagscache.itervalues():
457 for tags in nodetagscache.itervalues():
458 tags.sort()
458 tags.sort()
459 self._tagscache.nodetagscache = nodetagscache
459 self._tagscache.nodetagscache = nodetagscache
460 return self._tagscache.nodetagscache.get(node, [])
460 return self._tagscache.nodetagscache.get(node, [])
461
461
462 def nodebookmarks(self, node):
462 def nodebookmarks(self, node):
463 marks = []
463 marks = []
464 for bookmark, n in self._bookmarks.iteritems():
464 for bookmark, n in self._bookmarks.iteritems():
465 if n == node:
465 if n == node:
466 marks.append(bookmark)
466 marks.append(bookmark)
467 return sorted(marks)
467 return sorted(marks)
468
468
469 def _branchtags(self, partial, lrev):
469 def _branchtags(self, partial, lrev):
470 # TODO: rename this function?
470 # TODO: rename this function?
471 tiprev = len(self) - 1
471 tiprev = len(self) - 1
472 if lrev != tiprev:
472 if lrev != tiprev:
473 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
473 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
474 self._updatebranchcache(partial, ctxgen)
474 self._updatebranchcache(partial, ctxgen)
475 self._writebranchcache(partial, self.changelog.tip(), tiprev)
475 self._writebranchcache(partial, self.changelog.tip(), tiprev)
476
476
477 return partial
477 return partial
478
478
479 def updatebranchcache(self):
479 def updatebranchcache(self):
480 tip = self.changelog.tip()
480 tip = self.changelog.tip()
481 if self._branchcache is not None and self._branchcachetip == tip:
481 if self._branchcache is not None and self._branchcachetip == tip:
482 return self._branchcache
482 return self._branchcache
483
483
484 oldtip = self._branchcachetip
484 oldtip = self._branchcachetip
485 self._branchcachetip = tip
485 self._branchcachetip = tip
486 if oldtip is None or oldtip not in self.changelog.nodemap:
486 if oldtip is None or oldtip not in self.changelog.nodemap:
487 partial, last, lrev = self._readbranchcache()
487 partial, last, lrev = self._readbranchcache()
488 else:
488 else:
489 lrev = self.changelog.rev(oldtip)
489 lrev = self.changelog.rev(oldtip)
490 partial = self._branchcache
490 partial = self._branchcache
491
491
492 self._branchtags(partial, lrev)
492 self._branchtags(partial, lrev)
493 # this private cache holds all heads (not just tips)
493 # this private cache holds all heads (not just tips)
494 self._branchcache = partial
494 self._branchcache = partial
495
495
496 def branchmap(self):
496 def branchmap(self):
497 '''returns a dictionary {branch: [branchheads]}'''
497 '''returns a dictionary {branch: [branchheads]}'''
498 self.updatebranchcache()
498 self.updatebranchcache()
499 return self._branchcache
499 return self._branchcache
500
500
501 def branchtags(self):
501 def branchtags(self):
502 '''return a dict where branch names map to the tipmost head of
502 '''return a dict where branch names map to the tipmost head of
503 the branch, open heads come before closed'''
503 the branch, open heads come before closed'''
504 bt = {}
504 bt = {}
505 for bn, heads in self.branchmap().iteritems():
505 for bn, heads in self.branchmap().iteritems():
506 tip = heads[-1]
506 tip = heads[-1]
507 for h in reversed(heads):
507 for h in reversed(heads):
508 if 'close' not in self.changelog.read(h)[5]:
508 if 'close' not in self.changelog.read(h)[5]:
509 tip = h
509 tip = h
510 break
510 break
511 bt[bn] = tip
511 bt[bn] = tip
512 return bt
512 return bt
513
513
514 def _readbranchcache(self):
514 def _readbranchcache(self):
515 partial = {}
515 partial = {}
516 try:
516 try:
517 f = self.opener("cache/branchheads")
517 f = self.opener("cache/branchheads")
518 lines = f.read().split('\n')
518 lines = f.read().split('\n')
519 f.close()
519 f.close()
520 except (IOError, OSError):
520 except (IOError, OSError):
521 return {}, nullid, nullrev
521 return {}, nullid, nullrev
522
522
523 try:
523 try:
524 last, lrev = lines.pop(0).split(" ", 1)
524 last, lrev = lines.pop(0).split(" ", 1)
525 last, lrev = bin(last), int(lrev)
525 last, lrev = bin(last), int(lrev)
526 if lrev >= len(self) or self[lrev].node() != last:
526 if lrev >= len(self) or self[lrev].node() != last:
527 # invalidate the cache
527 # invalidate the cache
528 raise ValueError('invalidating branch cache (tip differs)')
528 raise ValueError('invalidating branch cache (tip differs)')
529 for l in lines:
529 for l in lines:
530 if not l:
530 if not l:
531 continue
531 continue
532 node, label = l.split(" ", 1)
532 node, label = l.split(" ", 1)
533 label = encoding.tolocal(label.strip())
533 label = encoding.tolocal(label.strip())
534 partial.setdefault(label, []).append(bin(node))
534 partial.setdefault(label, []).append(bin(node))
535 except KeyboardInterrupt:
535 except KeyboardInterrupt:
536 raise
536 raise
537 except Exception, inst:
537 except Exception, inst:
538 if self.ui.debugflag:
538 if self.ui.debugflag:
539 self.ui.warn(str(inst), '\n')
539 self.ui.warn(str(inst), '\n')
540 partial, last, lrev = {}, nullid, nullrev
540 partial, last, lrev = {}, nullid, nullrev
541 return partial, last, lrev
541 return partial, last, lrev
542
542
543 def _writebranchcache(self, branches, tip, tiprev):
543 def _writebranchcache(self, branches, tip, tiprev):
544 try:
544 try:
545 f = self.opener("cache/branchheads", "w", atomictemp=True)
545 f = self.opener("cache/branchheads", "w", atomictemp=True)
546 f.write("%s %s\n" % (hex(tip), tiprev))
546 f.write("%s %s\n" % (hex(tip), tiprev))
547 for label, nodes in branches.iteritems():
547 for label, nodes in branches.iteritems():
548 for node in nodes:
548 for node in nodes:
549 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
549 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
550 f.close()
550 f.close()
551 except (IOError, OSError):
551 except (IOError, OSError):
552 pass
552 pass
553
553
554 def _updatebranchcache(self, partial, ctxgen):
554 def _updatebranchcache(self, partial, ctxgen):
555 # collect new branch entries
555 # collect new branch entries
556 newbranches = {}
556 newbranches = {}
557 for c in ctxgen:
557 for c in ctxgen:
558 newbranches.setdefault(c.branch(), []).append(c.node())
558 newbranches.setdefault(c.branch(), []).append(c.node())
559 # if older branchheads are reachable from new ones, they aren't
559 # if older branchheads are reachable from new ones, they aren't
560 # really branchheads. Note checking parents is insufficient:
560 # really branchheads. Note checking parents is insufficient:
561 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
561 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
562 for branch, newnodes in newbranches.iteritems():
562 for branch, newnodes in newbranches.iteritems():
563 bheads = partial.setdefault(branch, [])
563 bheads = partial.setdefault(branch, [])
564 bheads.extend(newnodes)
564 bheads.extend(newnodes)
565 if len(bheads) <= 1:
565 if len(bheads) <= 1:
566 continue
566 continue
567 bheads = sorted(bheads, key=lambda x: self[x].rev())
567 bheads = sorted(bheads, key=lambda x: self[x].rev())
568 # starting from tip means fewer passes over reachable
568 # starting from tip means fewer passes over reachable
569 while newnodes:
569 while newnodes:
570 latest = newnodes.pop()
570 latest = newnodes.pop()
571 if latest not in bheads:
571 if latest not in bheads:
572 continue
572 continue
573 minbhrev = self[bheads[0]].node()
573 minbhrev = self[bheads[0]].node()
574 reachable = self.changelog.reachable(latest, minbhrev)
574 reachable = self.changelog.reachable(latest, minbhrev)
575 reachable.remove(latest)
575 reachable.remove(latest)
576 if reachable:
576 if reachable:
577 bheads = [b for b in bheads if b not in reachable]
577 bheads = [b for b in bheads if b not in reachable]
578 partial[branch] = bheads
578 partial[branch] = bheads
579
579
580 def lookup(self, key):
580 def lookup(self, key):
581 if isinstance(key, int):
581 if isinstance(key, int):
582 return self.changelog.node(key)
582 return self.changelog.node(key)
583 elif key == '.':
583 elif key == '.':
584 return self.dirstate.p1()
584 return self.dirstate.p1()
585 elif key == 'null':
585 elif key == 'null':
586 return nullid
586 return nullid
587 elif key == 'tip':
587 elif key == 'tip':
588 return self.changelog.tip()
588 return self.changelog.tip()
589 n = self.changelog._match(key)
589 n = self.changelog._match(key)
590 if n:
590 if n:
591 return n
591 return n
592 if key in self._bookmarks:
592 if key in self._bookmarks:
593 return self._bookmarks[key]
593 return self._bookmarks[key]
594 if key in self.tags():
594 if key in self.tags():
595 return self.tags()[key]
595 return self.tags()[key]
596 if key in self.branchtags():
596 if key in self.branchtags():
597 return self.branchtags()[key]
597 return self.branchtags()[key]
598 n = self.changelog._partialmatch(key)
598 n = self.changelog._partialmatch(key)
599 if n:
599 if n:
600 return n
600 return n
601
601
602 # can't find key, check if it might have come from damaged dirstate
602 # can't find key, check if it might have come from damaged dirstate
603 if key in self.dirstate.parents():
603 if key in self.dirstate.parents():
604 raise error.Abort(_("working directory has unknown parent '%s'!")
604 raise error.Abort(_("working directory has unknown parent '%s'!")
605 % short(key))
605 % short(key))
606 try:
606 try:
607 if len(key) == 20:
607 if len(key) == 20:
608 key = hex(key)
608 key = hex(key)
609 except TypeError:
609 except TypeError:
610 pass
610 pass
611 raise error.RepoLookupError(_("unknown revision '%s'") % key)
611 raise error.RepoLookupError(_("unknown revision '%s'") % key)
612
612
613 def lookupbranch(self, key, remote=None):
613 def lookupbranch(self, key, remote=None):
614 repo = remote or self
614 repo = remote or self
615 if key in repo.branchmap():
615 if key in repo.branchmap():
616 return key
616 return key
617
617
618 repo = (remote and remote.local()) and remote or self
618 repo = (remote and remote.local()) and remote or self
619 return repo[key].branch()
619 return repo[key].branch()
620
620
621 def known(self, nodes):
621 def known(self, nodes):
622 nm = self.changelog.nodemap
622 nm = self.changelog.nodemap
623 return [(n in nm) for n in nodes]
623 return [(n in nm) for n in nodes]
624
624
625 def local(self):
625 def local(self):
626 return self
626 return self
627
627
628 def join(self, f):
628 def join(self, f):
629 return os.path.join(self.path, f)
629 return os.path.join(self.path, f)
630
630
631 def wjoin(self, f):
631 def wjoin(self, f):
632 return os.path.join(self.root, f)
632 return os.path.join(self.root, f)
633
633
634 def file(self, f):
634 def file(self, f):
635 if f[0] == '/':
635 if f[0] == '/':
636 f = f[1:]
636 f = f[1:]
637 return filelog.filelog(self.sopener, f)
637 return filelog.filelog(self.sopener, f)
638
638
639 def changectx(self, changeid):
639 def changectx(self, changeid):
640 return self[changeid]
640 return self[changeid]
641
641
642 def parents(self, changeid=None):
642 def parents(self, changeid=None):
643 '''get list of changectxs for parents of changeid'''
643 '''get list of changectxs for parents of changeid'''
644 return self[changeid].parents()
644 return self[changeid].parents()
645
645
646 def filectx(self, path, changeid=None, fileid=None):
646 def filectx(self, path, changeid=None, fileid=None):
647 """changeid can be a changeset revision, node, or tag.
647 """changeid can be a changeset revision, node, or tag.
648 fileid can be a file revision or node."""
648 fileid can be a file revision or node."""
649 return context.filectx(self, path, changeid, fileid)
649 return context.filectx(self, path, changeid, fileid)
650
650
651 def getcwd(self):
651 def getcwd(self):
652 return self.dirstate.getcwd()
652 return self.dirstate.getcwd()
653
653
654 def pathto(self, f, cwd=None):
654 def pathto(self, f, cwd=None):
655 return self.dirstate.pathto(f, cwd)
655 return self.dirstate.pathto(f, cwd)
656
656
657 def wfile(self, f, mode='r'):
657 def wfile(self, f, mode='r'):
658 return self.wopener(f, mode)
658 return self.wopener(f, mode)
659
659
660 def _link(self, f):
660 def _link(self, f):
661 return os.path.islink(self.wjoin(f))
661 return os.path.islink(self.wjoin(f))
662
662
663 def _loadfilter(self, filter):
663 def _loadfilter(self, filter):
664 if filter not in self.filterpats:
664 if filter not in self.filterpats:
665 l = []
665 l = []
666 for pat, cmd in self.ui.configitems(filter):
666 for pat, cmd in self.ui.configitems(filter):
667 if cmd == '!':
667 if cmd == '!':
668 continue
668 continue
669 mf = matchmod.match(self.root, '', [pat])
669 mf = matchmod.match(self.root, '', [pat])
670 fn = None
670 fn = None
671 params = cmd
671 params = cmd
672 for name, filterfn in self._datafilters.iteritems():
672 for name, filterfn in self._datafilters.iteritems():
673 if cmd.startswith(name):
673 if cmd.startswith(name):
674 fn = filterfn
674 fn = filterfn
675 params = cmd[len(name):].lstrip()
675 params = cmd[len(name):].lstrip()
676 break
676 break
677 if not fn:
677 if not fn:
678 fn = lambda s, c, **kwargs: util.filter(s, c)
678 fn = lambda s, c, **kwargs: util.filter(s, c)
679 # Wrap old filters not supporting keyword arguments
679 # Wrap old filters not supporting keyword arguments
680 if not inspect.getargspec(fn)[2]:
680 if not inspect.getargspec(fn)[2]:
681 oldfn = fn
681 oldfn = fn
682 fn = lambda s, c, **kwargs: oldfn(s, c)
682 fn = lambda s, c, **kwargs: oldfn(s, c)
683 l.append((mf, fn, params))
683 l.append((mf, fn, params))
684 self.filterpats[filter] = l
684 self.filterpats[filter] = l
685 return self.filterpats[filter]
685 return self.filterpats[filter]
686
686
687 def _filter(self, filterpats, filename, data):
687 def _filter(self, filterpats, filename, data):
688 for mf, fn, cmd in filterpats:
688 for mf, fn, cmd in filterpats:
689 if mf(filename):
689 if mf(filename):
690 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
690 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
691 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
691 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
692 break
692 break
693
693
694 return data
694 return data
695
695
696 @propertycache
696 @propertycache
697 def _encodefilterpats(self):
697 def _encodefilterpats(self):
698 return self._loadfilter('encode')
698 return self._loadfilter('encode')
699
699
700 @propertycache
700 @propertycache
701 def _decodefilterpats(self):
701 def _decodefilterpats(self):
702 return self._loadfilter('decode')
702 return self._loadfilter('decode')
703
703
704 def adddatafilter(self, name, filter):
704 def adddatafilter(self, name, filter):
705 self._datafilters[name] = filter
705 self._datafilters[name] = filter
706
706
707 def wread(self, filename):
707 def wread(self, filename):
708 if self._link(filename):
708 if self._link(filename):
709 data = os.readlink(self.wjoin(filename))
709 data = os.readlink(self.wjoin(filename))
710 else:
710 else:
711 data = self.wopener.read(filename)
711 data = self.wopener.read(filename)
712 return self._filter(self._encodefilterpats, filename, data)
712 return self._filter(self._encodefilterpats, filename, data)
713
713
714 def wwrite(self, filename, data, flags):
714 def wwrite(self, filename, data, flags):
715 data = self._filter(self._decodefilterpats, filename, data)
715 data = self._filter(self._decodefilterpats, filename, data)
716 if 'l' in flags:
716 if 'l' in flags:
717 self.wopener.symlink(data, filename)
717 self.wopener.symlink(data, filename)
718 else:
718 else:
719 self.wopener.write(filename, data)
719 self.wopener.write(filename, data)
720 if 'x' in flags:
720 if 'x' in flags:
721 util.setflags(self.wjoin(filename), False, True)
721 util.setflags(self.wjoin(filename), False, True)
722
722
723 def wwritedata(self, filename, data):
723 def wwritedata(self, filename, data):
724 return self._filter(self._decodefilterpats, filename, data)
724 return self._filter(self._decodefilterpats, filename, data)
725
725
726 def transaction(self, desc):
726 def transaction(self, desc):
727 tr = self._transref and self._transref() or None
727 tr = self._transref and self._transref() or None
728 if tr and tr.running():
728 if tr and tr.running():
729 return tr.nest()
729 return tr.nest()
730
730
731 # abort here if the journal already exists
731 # abort here if the journal already exists
732 if os.path.exists(self.sjoin("journal")):
732 if os.path.exists(self.sjoin("journal")):
733 raise error.RepoError(
733 raise error.RepoError(
734 _("abandoned transaction found - run hg recover"))
734 _("abandoned transaction found - run hg recover"))
735
735
736 journalfiles = self._writejournal(desc)
736 journalfiles = self._writejournal(desc)
737 renames = [(x, undoname(x)) for x in journalfiles]
737 renames = [(x, undoname(x)) for x in journalfiles]
738
738
739 tr = transaction.transaction(self.ui.warn, self.sopener,
739 tr = transaction.transaction(self.ui.warn, self.sopener,
740 self.sjoin("journal"),
740 self.sjoin("journal"),
741 aftertrans(renames),
741 aftertrans(renames),
742 self.store.createmode)
742 self.store.createmode)
743 self._transref = weakref.ref(tr)
743 self._transref = weakref.ref(tr)
744 return tr
744 return tr
745
745
746 def _writejournal(self, desc):
746 def _writejournal(self, desc):
747 # save dirstate for rollback
747 # save dirstate for rollback
748 try:
748 try:
749 ds = self.opener.read("dirstate")
749 ds = self.opener.read("dirstate")
750 except IOError:
750 except IOError:
751 ds = ""
751 ds = ""
752 self.opener.write("journal.dirstate", ds)
752 self.opener.write("journal.dirstate", ds)
753 self.opener.write("journal.branch",
753 self.opener.write("journal.branch",
754 encoding.fromlocal(self.dirstate.branch()))
754 encoding.fromlocal(self.dirstate.branch()))
755 self.opener.write("journal.desc",
755 self.opener.write("journal.desc",
756 "%d\n%s\n" % (len(self), desc))
756 "%d\n%s\n" % (len(self), desc))
757
757
758 bkname = self.join('bookmarks')
758 bkname = self.join('bookmarks')
759 if os.path.exists(bkname):
759 if os.path.exists(bkname):
760 util.copyfile(bkname, self.join('journal.bookmarks'))
760 util.copyfile(bkname, self.join('journal.bookmarks'))
761 else:
761 else:
762 self.opener.write('journal.bookmarks', '')
762 self.opener.write('journal.bookmarks', '')
763 phasesname = self.sjoin('phaseroots')
763 phasesname = self.sjoin('phaseroots')
764 if os.path.exists(phasesname):
764 if os.path.exists(phasesname):
765 util.copyfile(phasesname, self.sjoin('journal.phaseroots'))
765 util.copyfile(phasesname, self.sjoin('journal.phaseroots'))
766 else:
766 else:
767 self.sopener.write('journal.phaseroots', '')
767 self.sopener.write('journal.phaseroots', '')
768
768
769 return (self.sjoin('journal'), self.join('journal.dirstate'),
769 return (self.sjoin('journal'), self.join('journal.dirstate'),
770 self.join('journal.branch'), self.join('journal.desc'),
770 self.join('journal.branch'), self.join('journal.desc'),
771 self.join('journal.bookmarks'),
771 self.join('journal.bookmarks'),
772 self.sjoin('journal.phaseroots'))
772 self.sjoin('journal.phaseroots'))
773
773
774 def recover(self):
774 def recover(self):
775 lock = self.lock()
775 lock = self.lock()
776 try:
776 try:
777 if os.path.exists(self.sjoin("journal")):
777 if os.path.exists(self.sjoin("journal")):
778 self.ui.status(_("rolling back interrupted transaction\n"))
778 self.ui.status(_("rolling back interrupted transaction\n"))
779 transaction.rollback(self.sopener, self.sjoin("journal"),
779 transaction.rollback(self.sopener, self.sjoin("journal"),
780 self.ui.warn)
780 self.ui.warn)
781 self.invalidate()
781 self.invalidate()
782 return True
782 return True
783 else:
783 else:
784 self.ui.warn(_("no interrupted transaction available\n"))
784 self.ui.warn(_("no interrupted transaction available\n"))
785 return False
785 return False
786 finally:
786 finally:
787 lock.release()
787 lock.release()
788
788
789 def rollback(self, dryrun=False, force=False):
789 def rollback(self, dryrun=False, force=False):
790 wlock = lock = None
790 wlock = lock = None
791 try:
791 try:
792 wlock = self.wlock()
792 wlock = self.wlock()
793 lock = self.lock()
793 lock = self.lock()
794 if os.path.exists(self.sjoin("undo")):
794 if os.path.exists(self.sjoin("undo")):
795 return self._rollback(dryrun, force)
795 return self._rollback(dryrun, force)
796 else:
796 else:
797 self.ui.warn(_("no rollback information available\n"))
797 self.ui.warn(_("no rollback information available\n"))
798 return 1
798 return 1
799 finally:
799 finally:
800 release(lock, wlock)
800 release(lock, wlock)
801
801
802 def _rollback(self, dryrun, force):
802 def _rollback(self, dryrun, force):
803 ui = self.ui
803 ui = self.ui
804 try:
804 try:
805 args = self.opener.read('undo.desc').splitlines()
805 args = self.opener.read('undo.desc').splitlines()
806 (oldlen, desc, detail) = (int(args[0]), args[1], None)
806 (oldlen, desc, detail) = (int(args[0]), args[1], None)
807 if len(args) >= 3:
807 if len(args) >= 3:
808 detail = args[2]
808 detail = args[2]
809 oldtip = oldlen - 1
809 oldtip = oldlen - 1
810
810
811 if detail and ui.verbose:
811 if detail and ui.verbose:
812 msg = (_('repository tip rolled back to revision %s'
812 msg = (_('repository tip rolled back to revision %s'
813 ' (undo %s: %s)\n')
813 ' (undo %s: %s)\n')
814 % (oldtip, desc, detail))
814 % (oldtip, desc, detail))
815 else:
815 else:
816 msg = (_('repository tip rolled back to revision %s'
816 msg = (_('repository tip rolled back to revision %s'
817 ' (undo %s)\n')
817 ' (undo %s)\n')
818 % (oldtip, desc))
818 % (oldtip, desc))
819 except IOError:
819 except IOError:
820 msg = _('rolling back unknown transaction\n')
820 msg = _('rolling back unknown transaction\n')
821 desc = None
821 desc = None
822
822
823 if not force and self['.'] != self['tip'] and desc == 'commit':
823 if not force and self['.'] != self['tip'] and desc == 'commit':
824 raise util.Abort(
824 raise util.Abort(
825 _('rollback of last commit while not checked out '
825 _('rollback of last commit while not checked out '
826 'may lose data'), hint=_('use -f to force'))
826 'may lose data'), hint=_('use -f to force'))
827
827
828 ui.status(msg)
828 ui.status(msg)
829 if dryrun:
829 if dryrun:
830 return 0
830 return 0
831
831
832 parents = self.dirstate.parents()
832 parents = self.dirstate.parents()
833 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
833 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
834 if os.path.exists(self.join('undo.bookmarks')):
834 if os.path.exists(self.join('undo.bookmarks')):
835 util.rename(self.join('undo.bookmarks'),
835 util.rename(self.join('undo.bookmarks'),
836 self.join('bookmarks'))
836 self.join('bookmarks'))
837 if os.path.exists(self.sjoin('undo.phaseroots')):
837 if os.path.exists(self.sjoin('undo.phaseroots')):
838 util.rename(self.sjoin('undo.phaseroots'),
838 util.rename(self.sjoin('undo.phaseroots'),
839 self.sjoin('phaseroots'))
839 self.sjoin('phaseroots'))
840 self.invalidate()
840 self.invalidate()
841
841
842 parentgone = (parents[0] not in self.changelog.nodemap or
842 parentgone = (parents[0] not in self.changelog.nodemap or
843 parents[1] not in self.changelog.nodemap)
843 parents[1] not in self.changelog.nodemap)
844 if parentgone:
844 if parentgone:
845 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
845 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
846 try:
846 try:
847 branch = self.opener.read('undo.branch')
847 branch = self.opener.read('undo.branch')
848 self.dirstate.setbranch(branch)
848 self.dirstate.setbranch(branch)
849 except IOError:
849 except IOError:
850 ui.warn(_('named branch could not be reset: '
850 ui.warn(_('named branch could not be reset: '
851 'current branch is still \'%s\'\n')
851 'current branch is still \'%s\'\n')
852 % self.dirstate.branch())
852 % self.dirstate.branch())
853
853
854 self.dirstate.invalidate()
854 self.dirstate.invalidate()
855 self.destroyed()
855 self.destroyed()
856 parents = tuple([p.rev() for p in self.parents()])
856 parents = tuple([p.rev() for p in self.parents()])
857 if len(parents) > 1:
857 if len(parents) > 1:
858 ui.status(_('working directory now based on '
858 ui.status(_('working directory now based on '
859 'revisions %d and %d\n') % parents)
859 'revisions %d and %d\n') % parents)
860 else:
860 else:
861 ui.status(_('working directory now based on '
861 ui.status(_('working directory now based on '
862 'revision %d\n') % parents)
862 'revision %d\n') % parents)
863 return 0
863 return 0
864
864
865 def invalidatecaches(self):
865 def invalidatecaches(self):
866 try:
866 try:
867 delattr(self, '_tagscache')
867 delattr(self, '_tagscache')
868 except AttributeError:
868 except AttributeError:
869 pass
869 pass
870
870
871 self._branchcache = None # in UTF-8
871 self._branchcache = None # in UTF-8
872 self._branchcachetip = None
872 self._branchcachetip = None
873
873
874 def invalidatedirstate(self):
874 def invalidatedirstate(self):
875 '''Invalidates the dirstate, causing the next call to dirstate
875 '''Invalidates the dirstate, causing the next call to dirstate
876 to check if it was modified since the last time it was read,
876 to check if it was modified since the last time it was read,
877 rereading it if it has.
877 rereading it if it has.
878
878
879 This is different to dirstate.invalidate() that it doesn't always
879 This is different to dirstate.invalidate() that it doesn't always
880 rereads the dirstate. Use dirstate.invalidate() if you want to
880 rereads the dirstate. Use dirstate.invalidate() if you want to
881 explicitly read the dirstate again (i.e. restoring it to a previous
881 explicitly read the dirstate again (i.e. restoring it to a previous
882 known good state).'''
882 known good state).'''
883 try:
883 try:
884 delattr(self, 'dirstate')
884 delattr(self, 'dirstate')
885 except AttributeError:
885 except AttributeError:
886 pass
886 pass
887
887
888 def invalidate(self):
888 def invalidate(self):
889 for k in self._filecache:
889 for k in self._filecache:
890 # dirstate is invalidated separately in invalidatedirstate()
890 # dirstate is invalidated separately in invalidatedirstate()
891 if k == 'dirstate':
891 if k == 'dirstate':
892 continue
892 continue
893
893
894 try:
894 try:
895 delattr(self, k)
895 delattr(self, k)
896 except AttributeError:
896 except AttributeError:
897 pass
897 pass
898 self.invalidatecaches()
898 self.invalidatecaches()
899
899
900 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
900 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
901 try:
901 try:
902 l = lock.lock(lockname, 0, releasefn, desc=desc)
902 l = lock.lock(lockname, 0, releasefn, desc=desc)
903 except error.LockHeld, inst:
903 except error.LockHeld, inst:
904 if not wait:
904 if not wait:
905 raise
905 raise
906 self.ui.warn(_("waiting for lock on %s held by %r\n") %
906 self.ui.warn(_("waiting for lock on %s held by %r\n") %
907 (desc, inst.locker))
907 (desc, inst.locker))
908 # default to 600 seconds timeout
908 # default to 600 seconds timeout
909 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
909 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
910 releasefn, desc=desc)
910 releasefn, desc=desc)
911 if acquirefn:
911 if acquirefn:
912 acquirefn()
912 acquirefn()
913 return l
913 return l
914
914
915 def _postrelease(self, callback):
915 def _postrelease(self, callback):
916 """add a callback to the current repository lock.
916 """add a callback to the current repository lock.
917
917
918 The callback will be executed on lock release."""
918 The callback will be executed on lock release."""
919 l = self._lockref and self._lockref()
919 l = self._lockref and self._lockref()
920 assert l is not None
920 assert l is not None
921 assert l.held
921 assert l.held
922 l.postreleasehooks.append(callback)
922 l.postreleasehooks.append(callback)
923
923
924 def lock(self, wait=True):
924 def lock(self, wait=True):
925 '''Lock the repository store (.hg/store) and return a weak reference
925 '''Lock the repository store (.hg/store) and return a weak reference
926 to the lock. Use this before modifying the store (e.g. committing or
926 to the lock. Use this before modifying the store (e.g. committing or
927 stripping). If you are opening a transaction, get a lock as well.)'''
927 stripping). If you are opening a transaction, get a lock as well.)'''
928 l = self._lockref and self._lockref()
928 l = self._lockref and self._lockref()
929 if l is not None and l.held:
929 if l is not None and l.held:
930 l.lock()
930 l.lock()
931 return l
931 return l
932
932
933 def unlock():
933 def unlock():
934 self.store.write()
934 self.store.write()
935 if self._dirtyphases:
935 if self._dirtyphases:
936 phases.writeroots(self)
936 phases.writeroots(self)
937 for k, ce in self._filecache.items():
937 for k, ce in self._filecache.items():
938 if k == 'dirstate':
938 if k == 'dirstate':
939 continue
939 continue
940 ce.refresh()
940 ce.refresh()
941
941
942 l = self._lock(self.sjoin("lock"), wait, unlock,
942 l = self._lock(self.sjoin("lock"), wait, unlock,
943 self.invalidate, _('repository %s') % self.origroot)
943 self.invalidate, _('repository %s') % self.origroot)
944 self._lockref = weakref.ref(l)
944 self._lockref = weakref.ref(l)
945 return l
945 return l
946
946
947 def wlock(self, wait=True):
947 def wlock(self, wait=True):
948 '''Lock the non-store parts of the repository (everything under
948 '''Lock the non-store parts of the repository (everything under
949 .hg except .hg/store) and return a weak reference to the lock.
949 .hg except .hg/store) and return a weak reference to the lock.
950 Use this before modifying files in .hg.'''
950 Use this before modifying files in .hg.'''
951 l = self._wlockref and self._wlockref()
951 l = self._wlockref and self._wlockref()
952 if l is not None and l.held:
952 if l is not None and l.held:
953 l.lock()
953 l.lock()
954 return l
954 return l
955
955
956 def unlock():
956 def unlock():
957 self.dirstate.write()
957 self.dirstate.write()
958 ce = self._filecache.get('dirstate')
958 ce = self._filecache.get('dirstate')
959 if ce:
959 if ce:
960 ce.refresh()
960 ce.refresh()
961
961
962 l = self._lock(self.join("wlock"), wait, unlock,
962 l = self._lock(self.join("wlock"), wait, unlock,
963 self.invalidatedirstate, _('working directory of %s') %
963 self.invalidatedirstate, _('working directory of %s') %
964 self.origroot)
964 self.origroot)
965 self._wlockref = weakref.ref(l)
965 self._wlockref = weakref.ref(l)
966 return l
966 return l
967
967
968 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
968 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
969 """
969 """
970 commit an individual file as part of a larger transaction
970 commit an individual file as part of a larger transaction
971 """
971 """
972
972
973 fname = fctx.path()
973 fname = fctx.path()
974 text = fctx.data()
974 text = fctx.data()
975 flog = self.file(fname)
975 flog = self.file(fname)
976 fparent1 = manifest1.get(fname, nullid)
976 fparent1 = manifest1.get(fname, nullid)
977 fparent2 = fparent2o = manifest2.get(fname, nullid)
977 fparent2 = fparent2o = manifest2.get(fname, nullid)
978
978
979 meta = {}
979 meta = {}
980 copy = fctx.renamed()
980 copy = fctx.renamed()
981 if copy and copy[0] != fname:
981 if copy and copy[0] != fname:
982 # Mark the new revision of this file as a copy of another
982 # Mark the new revision of this file as a copy of another
983 # file. This copy data will effectively act as a parent
983 # file. This copy data will effectively act as a parent
984 # of this new revision. If this is a merge, the first
984 # of this new revision. If this is a merge, the first
985 # parent will be the nullid (meaning "look up the copy data")
985 # parent will be the nullid (meaning "look up the copy data")
986 # and the second one will be the other parent. For example:
986 # and the second one will be the other parent. For example:
987 #
987 #
988 # 0 --- 1 --- 3 rev1 changes file foo
988 # 0 --- 1 --- 3 rev1 changes file foo
989 # \ / rev2 renames foo to bar and changes it
989 # \ / rev2 renames foo to bar and changes it
990 # \- 2 -/ rev3 should have bar with all changes and
990 # \- 2 -/ rev3 should have bar with all changes and
991 # should record that bar descends from
991 # should record that bar descends from
992 # bar in rev2 and foo in rev1
992 # bar in rev2 and foo in rev1
993 #
993 #
994 # this allows this merge to succeed:
994 # this allows this merge to succeed:
995 #
995 #
996 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
996 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
997 # \ / merging rev3 and rev4 should use bar@rev2
997 # \ / merging rev3 and rev4 should use bar@rev2
998 # \- 2 --- 4 as the merge base
998 # \- 2 --- 4 as the merge base
999 #
999 #
1000
1000
1001 cfname = copy[0]
1001 cfname = copy[0]
1002 crev = manifest1.get(cfname)
1002 crev = manifest1.get(cfname)
1003 newfparent = fparent2
1003 newfparent = fparent2
1004
1004
1005 if manifest2: # branch merge
1005 if manifest2: # branch merge
1006 if fparent2 == nullid or crev is None: # copied on remote side
1006 if fparent2 == nullid or crev is None: # copied on remote side
1007 if cfname in manifest2:
1007 if cfname in manifest2:
1008 crev = manifest2[cfname]
1008 crev = manifest2[cfname]
1009 newfparent = fparent1
1009 newfparent = fparent1
1010
1010
1011 # find source in nearest ancestor if we've lost track
1011 # find source in nearest ancestor if we've lost track
1012 if not crev:
1012 if not crev:
1013 self.ui.debug(" %s: searching for copy revision for %s\n" %
1013 self.ui.debug(" %s: searching for copy revision for %s\n" %
1014 (fname, cfname))
1014 (fname, cfname))
1015 for ancestor in self[None].ancestors():
1015 for ancestor in self[None].ancestors():
1016 if cfname in ancestor:
1016 if cfname in ancestor:
1017 crev = ancestor[cfname].filenode()
1017 crev = ancestor[cfname].filenode()
1018 break
1018 break
1019
1019
1020 if crev:
1020 if crev:
1021 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1021 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1022 meta["copy"] = cfname
1022 meta["copy"] = cfname
1023 meta["copyrev"] = hex(crev)
1023 meta["copyrev"] = hex(crev)
1024 fparent1, fparent2 = nullid, newfparent
1024 fparent1, fparent2 = nullid, newfparent
1025 else:
1025 else:
1026 self.ui.warn(_("warning: can't find ancestor for '%s' "
1026 self.ui.warn(_("warning: can't find ancestor for '%s' "
1027 "copied from '%s'!\n") % (fname, cfname))
1027 "copied from '%s'!\n") % (fname, cfname))
1028
1028
1029 elif fparent2 != nullid:
1029 elif fparent2 != nullid:
1030 # is one parent an ancestor of the other?
1030 # is one parent an ancestor of the other?
1031 fparentancestor = flog.ancestor(fparent1, fparent2)
1031 fparentancestor = flog.ancestor(fparent1, fparent2)
1032 if fparentancestor == fparent1:
1032 if fparentancestor == fparent1:
1033 fparent1, fparent2 = fparent2, nullid
1033 fparent1, fparent2 = fparent2, nullid
1034 elif fparentancestor == fparent2:
1034 elif fparentancestor == fparent2:
1035 fparent2 = nullid
1035 fparent2 = nullid
1036
1036
1037 # is the file changed?
1037 # is the file changed?
1038 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1038 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1039 changelist.append(fname)
1039 changelist.append(fname)
1040 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1040 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1041
1041
1042 # are just the flags changed during merge?
1042 # are just the flags changed during merge?
1043 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1043 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1044 changelist.append(fname)
1044 changelist.append(fname)
1045
1045
1046 return fparent1
1046 return fparent1
1047
1047
1048 def commit(self, text="", user=None, date=None, match=None, force=False,
1048 def commit(self, text="", user=None, date=None, match=None, force=False,
1049 editor=False, extra={}):
1049 editor=False, extra={}):
1050 """Add a new revision to current repository.
1050 """Add a new revision to current repository.
1051
1051
1052 Revision information is gathered from the working directory,
1052 Revision information is gathered from the working directory,
1053 match can be used to filter the committed files. If editor is
1053 match can be used to filter the committed files. If editor is
1054 supplied, it is called to get a commit message.
1054 supplied, it is called to get a commit message.
1055 """
1055 """
1056
1056
1057 def fail(f, msg):
1057 def fail(f, msg):
1058 raise util.Abort('%s: %s' % (f, msg))
1058 raise util.Abort('%s: %s' % (f, msg))
1059
1059
1060 if not match:
1060 if not match:
1061 match = matchmod.always(self.root, '')
1061 match = matchmod.always(self.root, '')
1062
1062
1063 if not force:
1063 if not force:
1064 vdirs = []
1064 vdirs = []
1065 match.dir = vdirs.append
1065 match.dir = vdirs.append
1066 match.bad = fail
1066 match.bad = fail
1067
1067
1068 wlock = self.wlock()
1068 wlock = self.wlock()
1069 try:
1069 try:
1070 wctx = self[None]
1070 wctx = self[None]
1071 merge = len(wctx.parents()) > 1
1071 merge = len(wctx.parents()) > 1
1072
1072
1073 if (not force and merge and match and
1073 if (not force and merge and match and
1074 (match.files() or match.anypats())):
1074 (match.files() or match.anypats())):
1075 raise util.Abort(_('cannot partially commit a merge '
1075 raise util.Abort(_('cannot partially commit a merge '
1076 '(do not specify files or patterns)'))
1076 '(do not specify files or patterns)'))
1077
1077
1078 changes = self.status(match=match, clean=force)
1078 changes = self.status(match=match, clean=force)
1079 if force:
1079 if force:
1080 changes[0].extend(changes[6]) # mq may commit unchanged files
1080 changes[0].extend(changes[6]) # mq may commit unchanged files
1081
1081
1082 # check subrepos
1082 # check subrepos
1083 subs = []
1083 subs = []
1084 removedsubs = set()
1084 removedsubs = set()
1085 if '.hgsub' in wctx:
1085 if '.hgsub' in wctx:
1086 # only manage subrepos and .hgsubstate if .hgsub is present
1086 # only manage subrepos and .hgsubstate if .hgsub is present
1087 for p in wctx.parents():
1087 for p in wctx.parents():
1088 removedsubs.update(s for s in p.substate if match(s))
1088 removedsubs.update(s for s in p.substate if match(s))
1089 for s in wctx.substate:
1089 for s in wctx.substate:
1090 removedsubs.discard(s)
1090 removedsubs.discard(s)
1091 if match(s) and wctx.sub(s).dirty():
1091 if match(s) and wctx.sub(s).dirty():
1092 subs.append(s)
1092 subs.append(s)
1093 if (subs or removedsubs):
1093 if (subs or removedsubs):
1094 if (not match('.hgsub') and
1094 if (not match('.hgsub') and
1095 '.hgsub' in (wctx.modified() + wctx.added())):
1095 '.hgsub' in (wctx.modified() + wctx.added())):
1096 raise util.Abort(
1096 raise util.Abort(
1097 _("can't commit subrepos without .hgsub"))
1097 _("can't commit subrepos without .hgsub"))
1098 if '.hgsubstate' not in changes[0]:
1098 if '.hgsubstate' not in changes[0]:
1099 changes[0].insert(0, '.hgsubstate')
1099 changes[0].insert(0, '.hgsubstate')
1100 if '.hgsubstate' in changes[2]:
1100 if '.hgsubstate' in changes[2]:
1101 changes[2].remove('.hgsubstate')
1101 changes[2].remove('.hgsubstate')
1102 elif '.hgsub' in changes[2]:
1102 elif '.hgsub' in changes[2]:
1103 # clean up .hgsubstate when .hgsub is removed
1103 # clean up .hgsubstate when .hgsub is removed
1104 if ('.hgsubstate' in wctx and
1104 if ('.hgsubstate' in wctx and
1105 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1105 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1106 changes[2].insert(0, '.hgsubstate')
1106 changes[2].insert(0, '.hgsubstate')
1107
1107
1108 if subs and not self.ui.configbool('ui', 'commitsubrepos', False):
1108 if subs and not self.ui.configbool('ui', 'commitsubrepos', False):
1109 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
1109 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
1110 if changedsubs:
1110 if changedsubs:
1111 raise util.Abort(_("uncommitted changes in subrepo %s")
1111 raise util.Abort(_("uncommitted changes in subrepo %s")
1112 % changedsubs[0],
1112 % changedsubs[0],
1113 hint=_("use --subrepos for recursive commit"))
1113 hint=_("use --subrepos for recursive commit"))
1114
1114
1115 # make sure all explicit patterns are matched
1115 # make sure all explicit patterns are matched
1116 if not force and match.files():
1116 if not force and match.files():
1117 matched = set(changes[0] + changes[1] + changes[2])
1117 matched = set(changes[0] + changes[1] + changes[2])
1118
1118
1119 for f in match.files():
1119 for f in match.files():
1120 if f == '.' or f in matched or f in wctx.substate:
1120 if f == '.' or f in matched or f in wctx.substate:
1121 continue
1121 continue
1122 if f in changes[3]: # missing
1122 if f in changes[3]: # missing
1123 fail(f, _('file not found!'))
1123 fail(f, _('file not found!'))
1124 if f in vdirs: # visited directory
1124 if f in vdirs: # visited directory
1125 d = f + '/'
1125 d = f + '/'
1126 for mf in matched:
1126 for mf in matched:
1127 if mf.startswith(d):
1127 if mf.startswith(d):
1128 break
1128 break
1129 else:
1129 else:
1130 fail(f, _("no match under directory!"))
1130 fail(f, _("no match under directory!"))
1131 elif f not in self.dirstate:
1131 elif f not in self.dirstate:
1132 fail(f, _("file not tracked!"))
1132 fail(f, _("file not tracked!"))
1133
1133
1134 if (not force and not extra.get("close") and not merge
1134 if (not force and not extra.get("close") and not merge
1135 and not (changes[0] or changes[1] or changes[2])
1135 and not (changes[0] or changes[1] or changes[2])
1136 and wctx.branch() == wctx.p1().branch()):
1136 and wctx.branch() == wctx.p1().branch()):
1137 return None
1137 return None
1138
1138
1139 ms = mergemod.mergestate(self)
1139 ms = mergemod.mergestate(self)
1140 for f in changes[0]:
1140 for f in changes[0]:
1141 if f in ms and ms[f] == 'u':
1141 if f in ms and ms[f] == 'u':
1142 raise util.Abort(_("unresolved merge conflicts "
1142 raise util.Abort(_("unresolved merge conflicts "
1143 "(see hg help resolve)"))
1143 "(see hg help resolve)"))
1144
1144
1145 cctx = context.workingctx(self, text, user, date, extra, changes)
1145 cctx = context.workingctx(self, text, user, date, extra, changes)
1146 if editor:
1146 if editor:
1147 cctx._text = editor(self, cctx, subs)
1147 cctx._text = editor(self, cctx, subs)
1148 edited = (text != cctx._text)
1148 edited = (text != cctx._text)
1149
1149
1150 # commit subs
1150 # commit subs
1151 if subs or removedsubs:
1151 if subs or removedsubs:
1152 state = wctx.substate.copy()
1152 state = wctx.substate.copy()
1153 for s in sorted(subs):
1153 for s in sorted(subs):
1154 sub = wctx.sub(s)
1154 sub = wctx.sub(s)
1155 self.ui.status(_('committing subrepository %s\n') %
1155 self.ui.status(_('committing subrepository %s\n') %
1156 subrepo.subrelpath(sub))
1156 subrepo.subrelpath(sub))
1157 sr = sub.commit(cctx._text, user, date)
1157 sr = sub.commit(cctx._text, user, date)
1158 state[s] = (state[s][0], sr)
1158 state[s] = (state[s][0], sr)
1159 subrepo.writestate(self, state)
1159 subrepo.writestate(self, state)
1160
1160
1161 # Save commit message in case this transaction gets rolled back
1161 # Save commit message in case this transaction gets rolled back
1162 # (e.g. by a pretxncommit hook). Leave the content alone on
1162 # (e.g. by a pretxncommit hook). Leave the content alone on
1163 # the assumption that the user will use the same editor again.
1163 # the assumption that the user will use the same editor again.
1164 msgfn = self.savecommitmessage(cctx._text)
1164 msgfn = self.savecommitmessage(cctx._text)
1165
1165
1166 p1, p2 = self.dirstate.parents()
1166 p1, p2 = self.dirstate.parents()
1167 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1167 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1168 try:
1168 try:
1169 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1169 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1170 ret = self.commitctx(cctx, True)
1170 ret = self.commitctx(cctx, True)
1171 except:
1171 except:
1172 if edited:
1172 if edited:
1173 self.ui.write(
1173 self.ui.write(
1174 _('note: commit message saved in %s\n') % msgfn)
1174 _('note: commit message saved in %s\n') % msgfn)
1175 raise
1175 raise
1176
1176
1177 # update bookmarks, dirstate and mergestate
1177 # update bookmarks, dirstate and mergestate
1178 bookmarks.update(self, p1, ret)
1178 bookmarks.update(self, p1, ret)
1179 for f in changes[0] + changes[1]:
1179 for f in changes[0] + changes[1]:
1180 self.dirstate.normal(f)
1180 self.dirstate.normal(f)
1181 for f in changes[2]:
1181 for f in changes[2]:
1182 self.dirstate.drop(f)
1182 self.dirstate.drop(f)
1183 self.dirstate.setparents(ret)
1183 self.dirstate.setparents(ret)
1184 ms.reset()
1184 ms.reset()
1185 finally:
1185 finally:
1186 wlock.release()
1186 wlock.release()
1187
1187
1188 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1188 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1189 return ret
1189 return ret
1190
1190
1191 def commitctx(self, ctx, error=False):
1191 def commitctx(self, ctx, error=False):
1192 """Add a new revision to current repository.
1192 """Add a new revision to current repository.
1193 Revision information is passed via the context argument.
1193 Revision information is passed via the context argument.
1194 """
1194 """
1195
1195
1196 tr = lock = None
1196 tr = lock = None
1197 removed = list(ctx.removed())
1197 removed = list(ctx.removed())
1198 p1, p2 = ctx.p1(), ctx.p2()
1198 p1, p2 = ctx.p1(), ctx.p2()
1199 user = ctx.user()
1199 user = ctx.user()
1200
1200
1201 lock = self.lock()
1201 lock = self.lock()
1202 try:
1202 try:
1203 tr = self.transaction("commit")
1203 tr = self.transaction("commit")
1204 trp = weakref.proxy(tr)
1204 trp = weakref.proxy(tr)
1205
1205
1206 if ctx.files():
1206 if ctx.files():
1207 m1 = p1.manifest().copy()
1207 m1 = p1.manifest().copy()
1208 m2 = p2.manifest()
1208 m2 = p2.manifest()
1209
1209
1210 # check in files
1210 # check in files
1211 new = {}
1211 new = {}
1212 changed = []
1212 changed = []
1213 linkrev = len(self)
1213 linkrev = len(self)
1214 for f in sorted(ctx.modified() + ctx.added()):
1214 for f in sorted(ctx.modified() + ctx.added()):
1215 self.ui.note(f + "\n")
1215 self.ui.note(f + "\n")
1216 try:
1216 try:
1217 fctx = ctx[f]
1217 fctx = ctx[f]
1218 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1218 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1219 changed)
1219 changed)
1220 m1.set(f, fctx.flags())
1220 m1.set(f, fctx.flags())
1221 except OSError, inst:
1221 except OSError, inst:
1222 self.ui.warn(_("trouble committing %s!\n") % f)
1222 self.ui.warn(_("trouble committing %s!\n") % f)
1223 raise
1223 raise
1224 except IOError, inst:
1224 except IOError, inst:
1225 errcode = getattr(inst, 'errno', errno.ENOENT)
1225 errcode = getattr(inst, 'errno', errno.ENOENT)
1226 if error or errcode and errcode != errno.ENOENT:
1226 if error or errcode and errcode != errno.ENOENT:
1227 self.ui.warn(_("trouble committing %s!\n") % f)
1227 self.ui.warn(_("trouble committing %s!\n") % f)
1228 raise
1228 raise
1229 else:
1229 else:
1230 removed.append(f)
1230 removed.append(f)
1231
1231
1232 # update manifest
1232 # update manifest
1233 m1.update(new)
1233 m1.update(new)
1234 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1234 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1235 drop = [f for f in removed if f in m1]
1235 drop = [f for f in removed if f in m1]
1236 for f in drop:
1236 for f in drop:
1237 del m1[f]
1237 del m1[f]
1238 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1238 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1239 p2.manifestnode(), (new, drop))
1239 p2.manifestnode(), (new, drop))
1240 files = changed + removed
1240 files = changed + removed
1241 else:
1241 else:
1242 mn = p1.manifestnode()
1242 mn = p1.manifestnode()
1243 files = []
1243 files = []
1244
1244
1245 # update changelog
1245 # update changelog
1246 self.changelog.delayupdate()
1246 self.changelog.delayupdate()
1247 n = self.changelog.add(mn, files, ctx.description(),
1247 n = self.changelog.add(mn, files, ctx.description(),
1248 trp, p1.node(), p2.node(),
1248 trp, p1.node(), p2.node(),
1249 user, ctx.date(), ctx.extra().copy())
1249 user, ctx.date(), ctx.extra().copy())
1250 p = lambda: self.changelog.writepending() and self.root or ""
1250 p = lambda: self.changelog.writepending() and self.root or ""
1251 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1251 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1252 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1252 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1253 parent2=xp2, pending=p)
1253 parent2=xp2, pending=p)
1254 self.changelog.finalize(trp)
1254 self.changelog.finalize(trp)
1255 # ensure the new commit is 1-phase
1255 # ensure the new commit is 1-phase
1256 phases.retractboundary(self, 1, [n])
1256 phases.retractboundary(self, 1, [n])
1257 tr.close()
1257 tr.close()
1258
1258
1259 if self._branchcache:
1259 if self._branchcache:
1260 self.updatebranchcache()
1260 self.updatebranchcache()
1261 return n
1261 return n
1262 finally:
1262 finally:
1263 if tr:
1263 if tr:
1264 tr.release()
1264 tr.release()
1265 lock.release()
1265 lock.release()
1266
1266
1267 def destroyed(self):
1267 def destroyed(self):
1268 '''Inform the repository that nodes have been destroyed.
1268 '''Inform the repository that nodes have been destroyed.
1269 Intended for use by strip and rollback, so there's a common
1269 Intended for use by strip and rollback, so there's a common
1270 place for anything that has to be done after destroying history.'''
1270 place for anything that has to be done after destroying history.'''
1271 # XXX it might be nice if we could take the list of destroyed
1271 # XXX it might be nice if we could take the list of destroyed
1272 # nodes, but I don't see an easy way for rollback() to do that
1272 # nodes, but I don't see an easy way for rollback() to do that
1273
1273
1274 # Ensure the persistent tag cache is updated. Doing it now
1274 # Ensure the persistent tag cache is updated. Doing it now
1275 # means that the tag cache only has to worry about destroyed
1275 # means that the tag cache only has to worry about destroyed
1276 # heads immediately after a strip/rollback. That in turn
1276 # heads immediately after a strip/rollback. That in turn
1277 # guarantees that "cachetip == currenttip" (comparing both rev
1277 # guarantees that "cachetip == currenttip" (comparing both rev
1278 # and node) always means no nodes have been added or destroyed.
1278 # and node) always means no nodes have been added or destroyed.
1279
1279
1280 # XXX this is suboptimal when qrefresh'ing: we strip the current
1280 # XXX this is suboptimal when qrefresh'ing: we strip the current
1281 # head, refresh the tag cache, then immediately add a new head.
1281 # head, refresh the tag cache, then immediately add a new head.
1282 # But I think doing it this way is necessary for the "instant
1282 # But I think doing it this way is necessary for the "instant
1283 # tag cache retrieval" case to work.
1283 # tag cache retrieval" case to work.
1284 self.invalidatecaches()
1284 self.invalidatecaches()
1285
1285
1286 def walk(self, match, node=None):
1286 def walk(self, match, node=None):
1287 '''
1287 '''
1288 walk recursively through the directory tree or a given
1288 walk recursively through the directory tree or a given
1289 changeset, finding all files matched by the match
1289 changeset, finding all files matched by the match
1290 function
1290 function
1291 '''
1291 '''
1292 return self[node].walk(match)
1292 return self[node].walk(match)
1293
1293
1294 def status(self, node1='.', node2=None, match=None,
1294 def status(self, node1='.', node2=None, match=None,
1295 ignored=False, clean=False, unknown=False,
1295 ignored=False, clean=False, unknown=False,
1296 listsubrepos=False):
1296 listsubrepos=False):
1297 """return status of files between two nodes or node and working directory
1297 """return status of files between two nodes or node and working directory
1298
1298
1299 If node1 is None, use the first dirstate parent instead.
1299 If node1 is None, use the first dirstate parent instead.
1300 If node2 is None, compare node1 with working directory.
1300 If node2 is None, compare node1 with working directory.
1301 """
1301 """
1302
1302
1303 def mfmatches(ctx):
1303 def mfmatches(ctx):
1304 mf = ctx.manifest().copy()
1304 mf = ctx.manifest().copy()
1305 for fn in mf.keys():
1305 for fn in mf.keys():
1306 if not match(fn):
1306 if not match(fn):
1307 del mf[fn]
1307 del mf[fn]
1308 return mf
1308 return mf
1309
1309
1310 if isinstance(node1, context.changectx):
1310 if isinstance(node1, context.changectx):
1311 ctx1 = node1
1311 ctx1 = node1
1312 else:
1312 else:
1313 ctx1 = self[node1]
1313 ctx1 = self[node1]
1314 if isinstance(node2, context.changectx):
1314 if isinstance(node2, context.changectx):
1315 ctx2 = node2
1315 ctx2 = node2
1316 else:
1316 else:
1317 ctx2 = self[node2]
1317 ctx2 = self[node2]
1318
1318
1319 working = ctx2.rev() is None
1319 working = ctx2.rev() is None
1320 parentworking = working and ctx1 == self['.']
1320 parentworking = working and ctx1 == self['.']
1321 match = match or matchmod.always(self.root, self.getcwd())
1321 match = match or matchmod.always(self.root, self.getcwd())
1322 listignored, listclean, listunknown = ignored, clean, unknown
1322 listignored, listclean, listunknown = ignored, clean, unknown
1323
1323
1324 # load earliest manifest first for caching reasons
1324 # load earliest manifest first for caching reasons
1325 if not working and ctx2.rev() < ctx1.rev():
1325 if not working and ctx2.rev() < ctx1.rev():
1326 ctx2.manifest()
1326 ctx2.manifest()
1327
1327
1328 if not parentworking:
1328 if not parentworking:
1329 def bad(f, msg):
1329 def bad(f, msg):
1330 if f not in ctx1:
1330 if f not in ctx1:
1331 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1331 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1332 match.bad = bad
1332 match.bad = bad
1333
1333
1334 if working: # we need to scan the working dir
1334 if working: # we need to scan the working dir
1335 subrepos = []
1335 subrepos = []
1336 if '.hgsub' in self.dirstate:
1336 if '.hgsub' in self.dirstate:
1337 subrepos = ctx2.substate.keys()
1337 subrepos = ctx2.substate.keys()
1338 s = self.dirstate.status(match, subrepos, listignored,
1338 s = self.dirstate.status(match, subrepos, listignored,
1339 listclean, listunknown)
1339 listclean, listunknown)
1340 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1340 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1341
1341
1342 # check for any possibly clean files
1342 # check for any possibly clean files
1343 if parentworking and cmp:
1343 if parentworking and cmp:
1344 fixup = []
1344 fixup = []
1345 # do a full compare of any files that might have changed
1345 # do a full compare of any files that might have changed
1346 for f in sorted(cmp):
1346 for f in sorted(cmp):
1347 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1347 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1348 or ctx1[f].cmp(ctx2[f])):
1348 or ctx1[f].cmp(ctx2[f])):
1349 modified.append(f)
1349 modified.append(f)
1350 else:
1350 else:
1351 fixup.append(f)
1351 fixup.append(f)
1352
1352
1353 # update dirstate for files that are actually clean
1353 # update dirstate for files that are actually clean
1354 if fixup:
1354 if fixup:
1355 if listclean:
1355 if listclean:
1356 clean += fixup
1356 clean += fixup
1357
1357
1358 try:
1358 try:
1359 # updating the dirstate is optional
1359 # updating the dirstate is optional
1360 # so we don't wait on the lock
1360 # so we don't wait on the lock
1361 wlock = self.wlock(False)
1361 wlock = self.wlock(False)
1362 try:
1362 try:
1363 for f in fixup:
1363 for f in fixup:
1364 self.dirstate.normal(f)
1364 self.dirstate.normal(f)
1365 finally:
1365 finally:
1366 wlock.release()
1366 wlock.release()
1367 except error.LockError:
1367 except error.LockError:
1368 pass
1368 pass
1369
1369
1370 if not parentworking:
1370 if not parentworking:
1371 mf1 = mfmatches(ctx1)
1371 mf1 = mfmatches(ctx1)
1372 if working:
1372 if working:
1373 # we are comparing working dir against non-parent
1373 # we are comparing working dir against non-parent
1374 # generate a pseudo-manifest for the working dir
1374 # generate a pseudo-manifest for the working dir
1375 mf2 = mfmatches(self['.'])
1375 mf2 = mfmatches(self['.'])
1376 for f in cmp + modified + added:
1376 for f in cmp + modified + added:
1377 mf2[f] = None
1377 mf2[f] = None
1378 mf2.set(f, ctx2.flags(f))
1378 mf2.set(f, ctx2.flags(f))
1379 for f in removed:
1379 for f in removed:
1380 if f in mf2:
1380 if f in mf2:
1381 del mf2[f]
1381 del mf2[f]
1382 else:
1382 else:
1383 # we are comparing two revisions
1383 # we are comparing two revisions
1384 deleted, unknown, ignored = [], [], []
1384 deleted, unknown, ignored = [], [], []
1385 mf2 = mfmatches(ctx2)
1385 mf2 = mfmatches(ctx2)
1386
1386
1387 modified, added, clean = [], [], []
1387 modified, added, clean = [], [], []
1388 for fn in mf2:
1388 for fn in mf2:
1389 if fn in mf1:
1389 if fn in mf1:
1390 if (fn not in deleted and
1390 if (fn not in deleted and
1391 (mf1.flags(fn) != mf2.flags(fn) or
1391 (mf1.flags(fn) != mf2.flags(fn) or
1392 (mf1[fn] != mf2[fn] and
1392 (mf1[fn] != mf2[fn] and
1393 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1393 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1394 modified.append(fn)
1394 modified.append(fn)
1395 elif listclean:
1395 elif listclean:
1396 clean.append(fn)
1396 clean.append(fn)
1397 del mf1[fn]
1397 del mf1[fn]
1398 elif fn not in deleted:
1398 elif fn not in deleted:
1399 added.append(fn)
1399 added.append(fn)
1400 removed = mf1.keys()
1400 removed = mf1.keys()
1401
1401
1402 if working and modified and not self.dirstate._checklink:
1402 if working and modified and not self.dirstate._checklink:
1403 # Symlink placeholders may get non-symlink-like contents
1403 # Symlink placeholders may get non-symlink-like contents
1404 # via user error or dereferencing by NFS or Samba servers,
1404 # via user error or dereferencing by NFS or Samba servers,
1405 # so we filter out any placeholders that don't look like a
1405 # so we filter out any placeholders that don't look like a
1406 # symlink
1406 # symlink
1407 sane = []
1407 sane = []
1408 for f in modified:
1408 for f in modified:
1409 if ctx2.flags(f) == 'l':
1409 if ctx2.flags(f) == 'l':
1410 d = ctx2[f].data()
1410 d = ctx2[f].data()
1411 if len(d) >= 1024 or '\n' in d or util.binary(d):
1411 if len(d) >= 1024 or '\n' in d or util.binary(d):
1412 self.ui.debug('ignoring suspect symlink placeholder'
1412 self.ui.debug('ignoring suspect symlink placeholder'
1413 ' "%s"\n' % f)
1413 ' "%s"\n' % f)
1414 continue
1414 continue
1415 sane.append(f)
1415 sane.append(f)
1416 modified = sane
1416 modified = sane
1417
1417
1418 r = modified, added, removed, deleted, unknown, ignored, clean
1418 r = modified, added, removed, deleted, unknown, ignored, clean
1419
1419
1420 if listsubrepos:
1420 if listsubrepos:
1421 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1421 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1422 if working:
1422 if working:
1423 rev2 = None
1423 rev2 = None
1424 else:
1424 else:
1425 rev2 = ctx2.substate[subpath][1]
1425 rev2 = ctx2.substate[subpath][1]
1426 try:
1426 try:
1427 submatch = matchmod.narrowmatcher(subpath, match)
1427 submatch = matchmod.narrowmatcher(subpath, match)
1428 s = sub.status(rev2, match=submatch, ignored=listignored,
1428 s = sub.status(rev2, match=submatch, ignored=listignored,
1429 clean=listclean, unknown=listunknown,
1429 clean=listclean, unknown=listunknown,
1430 listsubrepos=True)
1430 listsubrepos=True)
1431 for rfiles, sfiles in zip(r, s):
1431 for rfiles, sfiles in zip(r, s):
1432 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1432 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1433 except error.LookupError:
1433 except error.LookupError:
1434 self.ui.status(_("skipping missing subrepository: %s\n")
1434 self.ui.status(_("skipping missing subrepository: %s\n")
1435 % subpath)
1435 % subpath)
1436
1436
1437 for l in r:
1437 for l in r:
1438 l.sort()
1438 l.sort()
1439 return r
1439 return r
1440
1440
1441 def heads(self, start=None):
1441 def heads(self, start=None):
1442 heads = self.changelog.heads(start)
1442 heads = self.changelog.heads(start)
1443 # sort the output in rev descending order
1443 # sort the output in rev descending order
1444 return sorted(heads, key=self.changelog.rev, reverse=True)
1444 return sorted(heads, key=self.changelog.rev, reverse=True)
1445
1445
1446 def branchheads(self, branch=None, start=None, closed=False):
1446 def branchheads(self, branch=None, start=None, closed=False):
1447 '''return a (possibly filtered) list of heads for the given branch
1447 '''return a (possibly filtered) list of heads for the given branch
1448
1448
1449 Heads are returned in topological order, from newest to oldest.
1449 Heads are returned in topological order, from newest to oldest.
1450 If branch is None, use the dirstate branch.
1450 If branch is None, use the dirstate branch.
1451 If start is not None, return only heads reachable from start.
1451 If start is not None, return only heads reachable from start.
1452 If closed is True, return heads that are marked as closed as well.
1452 If closed is True, return heads that are marked as closed as well.
1453 '''
1453 '''
1454 if branch is None:
1454 if branch is None:
1455 branch = self[None].branch()
1455 branch = self[None].branch()
1456 branches = self.branchmap()
1456 branches = self.branchmap()
1457 if branch not in branches:
1457 if branch not in branches:
1458 return []
1458 return []
1459 # the cache returns heads ordered lowest to highest
1459 # the cache returns heads ordered lowest to highest
1460 bheads = list(reversed(branches[branch]))
1460 bheads = list(reversed(branches[branch]))
1461 if start is not None:
1461 if start is not None:
1462 # filter out the heads that cannot be reached from startrev
1462 # filter out the heads that cannot be reached from startrev
1463 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1463 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1464 bheads = [h for h in bheads if h in fbheads]
1464 bheads = [h for h in bheads if h in fbheads]
1465 if not closed:
1465 if not closed:
1466 bheads = [h for h in bheads if
1466 bheads = [h for h in bheads if
1467 ('close' not in self.changelog.read(h)[5])]
1467 ('close' not in self.changelog.read(h)[5])]
1468 return bheads
1468 return bheads
1469
1469
1470 def branches(self, nodes):
1470 def branches(self, nodes):
1471 if not nodes:
1471 if not nodes:
1472 nodes = [self.changelog.tip()]
1472 nodes = [self.changelog.tip()]
1473 b = []
1473 b = []
1474 for n in nodes:
1474 for n in nodes:
1475 t = n
1475 t = n
1476 while True:
1476 while True:
1477 p = self.changelog.parents(n)
1477 p = self.changelog.parents(n)
1478 if p[1] != nullid or p[0] == nullid:
1478 if p[1] != nullid or p[0] == nullid:
1479 b.append((t, n, p[0], p[1]))
1479 b.append((t, n, p[0], p[1]))
1480 break
1480 break
1481 n = p[0]
1481 n = p[0]
1482 return b
1482 return b
1483
1483
1484 def between(self, pairs):
1484 def between(self, pairs):
1485 r = []
1485 r = []
1486
1486
1487 for top, bottom in pairs:
1487 for top, bottom in pairs:
1488 n, l, i = top, [], 0
1488 n, l, i = top, [], 0
1489 f = 1
1489 f = 1
1490
1490
1491 while n != bottom and n != nullid:
1491 while n != bottom and n != nullid:
1492 p = self.changelog.parents(n)[0]
1492 p = self.changelog.parents(n)[0]
1493 if i == f:
1493 if i == f:
1494 l.append(n)
1494 l.append(n)
1495 f = f * 2
1495 f = f * 2
1496 n = p
1496 n = p
1497 i += 1
1497 i += 1
1498
1498
1499 r.append(l)
1499 r.append(l)
1500
1500
1501 return r
1501 return r
1502
1502
1503 def pull(self, remote, heads=None, force=False):
1503 def pull(self, remote, heads=None, force=False):
1504 lock = self.lock()
1504 lock = self.lock()
1505 try:
1505 try:
1506 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1506 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1507 force=force)
1507 force=force)
1508 common, fetch, rheads = tmp
1508 common, fetch, rheads = tmp
1509 if not fetch:
1509 if not fetch:
1510 self.ui.status(_("no changes found\n"))
1510 self.ui.status(_("no changes found\n"))
1511 result = 0
1511 result = 0
1512 else:
1512 else:
1513 if heads is None and list(common) == [nullid]:
1513 if heads is None and list(common) == [nullid]:
1514 self.ui.status(_("requesting all changes\n"))
1514 self.ui.status(_("requesting all changes\n"))
1515 elif heads is None and remote.capable('changegroupsubset'):
1515 elif heads is None and remote.capable('changegroupsubset'):
1516 # issue1320, avoid a race if remote changed after discovery
1516 # issue1320, avoid a race if remote changed after discovery
1517 heads = rheads
1517 heads = rheads
1518
1518
1519 if remote.capable('getbundle'):
1519 if remote.capable('getbundle'):
1520 cg = remote.getbundle('pull', common=common,
1520 cg = remote.getbundle('pull', common=common,
1521 heads=heads or rheads)
1521 heads=heads or rheads)
1522 elif heads is None:
1522 elif heads is None:
1523 cg = remote.changegroup(fetch, 'pull')
1523 cg = remote.changegroup(fetch, 'pull')
1524 elif not remote.capable('changegroupsubset'):
1524 elif not remote.capable('changegroupsubset'):
1525 raise util.Abort(_("partial pull cannot be done because "
1525 raise util.Abort(_("partial pull cannot be done because "
1526 "other repository doesn't support "
1526 "other repository doesn't support "
1527 "changegroupsubset."))
1527 "changegroupsubset."))
1528 else:
1528 else:
1529 cg = remote.changegroupsubset(fetch, heads, 'pull')
1529 cg = remote.changegroupsubset(fetch, heads, 'pull')
1530 result = self.addchangegroup(cg, 'pull', remote.url(),
1530 result = self.addchangegroup(cg, 'pull', remote.url(),
1531 lock=lock)
1531 lock=lock)
1532 phases.advanceboundary(self, 0, common)
1532 phases.advanceboundary(self, 0, common)
1533 finally:
1533 finally:
1534 lock.release()
1534 lock.release()
1535
1535
1536 return result
1536 return result
1537
1537
1538 def checkpush(self, force, revs):
1538 def checkpush(self, force, revs):
1539 """Extensions can override this function if additional checks have
1539 """Extensions can override this function if additional checks have
1540 to be performed before pushing, or call it if they override push
1540 to be performed before pushing, or call it if they override push
1541 command.
1541 command.
1542 """
1542 """
1543 pass
1543 pass
1544
1544
1545 def push(self, remote, force=False, revs=None, newbranch=False):
1545 def push(self, remote, force=False, revs=None, newbranch=False):
1546 '''Push outgoing changesets (limited by revs) from the current
1546 '''Push outgoing changesets (limited by revs) from the current
1547 repository to remote. Return an integer:
1547 repository to remote. Return an integer:
1548 - 0 means HTTP error *or* nothing to push
1548 - 0 means HTTP error *or* nothing to push
1549 - 1 means we pushed and remote head count is unchanged *or*
1549 - 1 means we pushed and remote head count is unchanged *or*
1550 we have outgoing changesets but refused to push
1550 we have outgoing changesets but refused to push
1551 - other values as described by addchangegroup()
1551 - other values as described by addchangegroup()
1552 '''
1552 '''
1553 # there are two ways to push to remote repo:
1553 # there are two ways to push to remote repo:
1554 #
1554 #
1555 # addchangegroup assumes local user can lock remote
1555 # addchangegroup assumes local user can lock remote
1556 # repo (local filesystem, old ssh servers).
1556 # repo (local filesystem, old ssh servers).
1557 #
1557 #
1558 # unbundle assumes local user cannot lock remote repo (new ssh
1558 # unbundle assumes local user cannot lock remote repo (new ssh
1559 # servers, http servers).
1559 # servers, http servers).
1560
1560
1561 self.checkpush(force, revs)
1561 self.checkpush(force, revs)
1562 lock = None
1562 lock = None
1563 unbundle = remote.capable('unbundle')
1563 unbundle = remote.capable('unbundle')
1564 if not unbundle:
1564 if not unbundle:
1565 lock = remote.lock()
1565 lock = remote.lock()
1566 try:
1566 try:
1567 # get local lock as we might write phase data
1567 # get local lock as we might write phase data
1568 locallock = self.lock()
1568 locallock = self.lock()
1569 try:
1569 try:
1570 cg, remote_heads, fut = discovery.prepush(self, remote, force,
1570 cg, remote_heads, fut = discovery.prepush(self, remote, force,
1571 revs, newbranch)
1571 revs, newbranch)
1572 ret = remote_heads
1572 ret = remote_heads
1573 if cg is not None:
1573 if cg is not None:
1574 if unbundle:
1574 if unbundle:
1575 # local repo finds heads on server, finds out what
1575 # local repo finds heads on server, finds out what
1576 # revs it must push. once revs transferred, if server
1576 # revs it must push. once revs transferred, if server
1577 # finds it has different heads (someone else won
1577 # finds it has different heads (someone else won
1578 # commit/push race), server aborts.
1578 # commit/push race), server aborts.
1579 if force:
1579 if force:
1580 remote_heads = ['force']
1580 remote_heads = ['force']
1581 # ssh: return remote's addchangegroup()
1581 # ssh: return remote's addchangegroup()
1582 # http: return remote's addchangegroup() or 0 for error
1582 # http: return remote's addchangegroup() or 0 for error
1583 ret = remote.unbundle(cg, remote_heads, 'push')
1583 ret = remote.unbundle(cg, remote_heads, 'push')
1584 else:
1584 else:
1585 # we return an integer indicating remote head count change
1585 # we return an integer indicating remote head count change
1586 ret = remote.addchangegroup(cg, 'push', self.url(),
1586 ret = remote.addchangegroup(cg, 'push', self.url(),
1587 lock=lock)
1587 lock=lock)
1588 # if we don't push, the common data is already useful
1588 # if we don't push, the common data is already useful
1589 # everything exchange is public for now
1589 # everything exchange is public for now
1590 phases.advanceboundary(self, 0, fut)
1590 phases.advanceboundary(self, 0, fut)
1591 finally:
1591 finally:
1592 locallock.release()
1592 locallock.release()
1593 finally:
1593 finally:
1594 if lock is not None:
1594 if lock is not None:
1595 lock.release()
1595 lock.release()
1596
1596
1597 self.ui.debug("checking for updated bookmarks\n")
1597 self.ui.debug("checking for updated bookmarks\n")
1598 rb = remote.listkeys('bookmarks')
1598 rb = remote.listkeys('bookmarks')
1599 for k in rb.keys():
1599 for k in rb.keys():
1600 if k in self._bookmarks:
1600 if k in self._bookmarks:
1601 nr, nl = rb[k], hex(self._bookmarks[k])
1601 nr, nl = rb[k], hex(self._bookmarks[k])
1602 if nr in self:
1602 if nr in self:
1603 cr = self[nr]
1603 cr = self[nr]
1604 cl = self[nl]
1604 cl = self[nl]
1605 if cl in cr.descendants():
1605 if cl in cr.descendants():
1606 r = remote.pushkey('bookmarks', k, nr, nl)
1606 r = remote.pushkey('bookmarks', k, nr, nl)
1607 if r:
1607 if r:
1608 self.ui.status(_("updating bookmark %s\n") % k)
1608 self.ui.status(_("updating bookmark %s\n") % k)
1609 else:
1609 else:
1610 self.ui.warn(_('updating bookmark %s'
1610 self.ui.warn(_('updating bookmark %s'
1611 ' failed!\n') % k)
1611 ' failed!\n') % k)
1612
1612
1613 return ret
1613 return ret
1614
1614
1615 def changegroupinfo(self, nodes, source):
1615 def changegroupinfo(self, nodes, source):
1616 if self.ui.verbose or source == 'bundle':
1616 if self.ui.verbose or source == 'bundle':
1617 self.ui.status(_("%d changesets found\n") % len(nodes))
1617 self.ui.status(_("%d changesets found\n") % len(nodes))
1618 if self.ui.debugflag:
1618 if self.ui.debugflag:
1619 self.ui.debug("list of changesets:\n")
1619 self.ui.debug("list of changesets:\n")
1620 for node in nodes:
1620 for node in nodes:
1621 self.ui.debug("%s\n" % hex(node))
1621 self.ui.debug("%s\n" % hex(node))
1622
1622
1623 def changegroupsubset(self, bases, heads, source):
1623 def changegroupsubset(self, bases, heads, source):
1624 """Compute a changegroup consisting of all the nodes that are
1624 """Compute a changegroup consisting of all the nodes that are
1625 descendants of any of the bases and ancestors of any of the heads.
1625 descendants of any of the bases and ancestors of any of the heads.
1626 Return a chunkbuffer object whose read() method will return
1626 Return a chunkbuffer object whose read() method will return
1627 successive changegroup chunks.
1627 successive changegroup chunks.
1628
1628
1629 It is fairly complex as determining which filenodes and which
1629 It is fairly complex as determining which filenodes and which
1630 manifest nodes need to be included for the changeset to be complete
1630 manifest nodes need to be included for the changeset to be complete
1631 is non-trivial.
1631 is non-trivial.
1632
1632
1633 Another wrinkle is doing the reverse, figuring out which changeset in
1633 Another wrinkle is doing the reverse, figuring out which changeset in
1634 the changegroup a particular filenode or manifestnode belongs to.
1634 the changegroup a particular filenode or manifestnode belongs to.
1635 """
1635 """
1636 cl = self.changelog
1636 cl = self.changelog
1637 if not bases:
1637 if not bases:
1638 bases = [nullid]
1638 bases = [nullid]
1639 csets, bases, heads = cl.nodesbetween(bases, heads)
1639 csets, bases, heads = cl.nodesbetween(bases, heads)
1640 # We assume that all ancestors of bases are known
1640 # We assume that all ancestors of bases are known
1641 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1641 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1642 return self._changegroupsubset(common, csets, heads, source)
1642 return self._changegroupsubset(common, csets, heads, source)
1643
1643
1644 def getbundle(self, source, heads=None, common=None):
1644 def getbundle(self, source, heads=None, common=None):
1645 """Like changegroupsubset, but returns the set difference between the
1645 """Like changegroupsubset, but returns the set difference between the
1646 ancestors of heads and the ancestors common.
1646 ancestors of heads and the ancestors common.
1647
1647
1648 If heads is None, use the local heads. If common is None, use [nullid].
1648 If heads is None, use the local heads. If common is None, use [nullid].
1649
1649
1650 The nodes in common might not all be known locally due to the way the
1650 The nodes in common might not all be known locally due to the way the
1651 current discovery protocol works.
1651 current discovery protocol works.
1652 """
1652 """
1653 cl = self.changelog
1653 cl = self.changelog
1654 if common:
1654 if common:
1655 nm = cl.nodemap
1655 nm = cl.nodemap
1656 common = [n for n in common if n in nm]
1656 common = [n for n in common if n in nm]
1657 else:
1657 else:
1658 common = [nullid]
1658 common = [nullid]
1659 if not heads:
1659 if not heads:
1660 heads = cl.heads()
1660 heads = cl.heads()
1661 common, missing = cl.findcommonmissing(common, heads)
1661 common, missing = cl.findcommonmissing(common, heads)
1662 if not missing:
1662 if not missing:
1663 return None
1663 return None
1664 return self._changegroupsubset(common, missing, heads, source)
1664 return self._changegroupsubset(common, missing, heads, source)
1665
1665
1666 def _changegroupsubset(self, commonrevs, csets, heads, source):
1666 def _changegroupsubset(self, commonrevs, csets, heads, source):
1667
1667
1668 cl = self.changelog
1668 cl = self.changelog
1669 mf = self.manifest
1669 mf = self.manifest
1670 mfs = {} # needed manifests
1670 mfs = {} # needed manifests
1671 fnodes = {} # needed file nodes
1671 fnodes = {} # needed file nodes
1672 changedfiles = set()
1672 changedfiles = set()
1673 fstate = ['', {}]
1673 fstate = ['', {}]
1674 count = [0]
1674 count = [0]
1675
1675
1676 # can we go through the fast path ?
1676 # can we go through the fast path ?
1677 heads.sort()
1677 heads.sort()
1678 if heads == sorted(self.heads()):
1678 if heads == sorted(self.heads()):
1679 return self._changegroup(csets, source)
1679 return self._changegroup(csets, source)
1680
1680
1681 # slow path
1681 # slow path
1682 self.hook('preoutgoing', throw=True, source=source)
1682 self.hook('preoutgoing', throw=True, source=source)
1683 self.changegroupinfo(csets, source)
1683 self.changegroupinfo(csets, source)
1684
1684
1685 # filter any nodes that claim to be part of the known set
1685 # filter any nodes that claim to be part of the known set
1686 def prune(revlog, missing):
1686 def prune(revlog, missing):
1687 return [n for n in missing
1687 return [n for n in missing
1688 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1688 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1689
1689
1690 def lookup(revlog, x):
1690 def lookup(revlog, x):
1691 if revlog == cl:
1691 if revlog == cl:
1692 c = cl.read(x)
1692 c = cl.read(x)
1693 changedfiles.update(c[3])
1693 changedfiles.update(c[3])
1694 mfs.setdefault(c[0], x)
1694 mfs.setdefault(c[0], x)
1695 count[0] += 1
1695 count[0] += 1
1696 self.ui.progress(_('bundling'), count[0],
1696 self.ui.progress(_('bundling'), count[0],
1697 unit=_('changesets'), total=len(csets))
1697 unit=_('changesets'), total=len(csets))
1698 return x
1698 return x
1699 elif revlog == mf:
1699 elif revlog == mf:
1700 clnode = mfs[x]
1700 clnode = mfs[x]
1701 mdata = mf.readfast(x)
1701 mdata = mf.readfast(x)
1702 for f in changedfiles:
1702 for f in changedfiles:
1703 if f in mdata:
1703 if f in mdata:
1704 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1704 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1705 count[0] += 1
1705 count[0] += 1
1706 self.ui.progress(_('bundling'), count[0],
1706 self.ui.progress(_('bundling'), count[0],
1707 unit=_('manifests'), total=len(mfs))
1707 unit=_('manifests'), total=len(mfs))
1708 return mfs[x]
1708 return mfs[x]
1709 else:
1709 else:
1710 self.ui.progress(
1710 self.ui.progress(
1711 _('bundling'), count[0], item=fstate[0],
1711 _('bundling'), count[0], item=fstate[0],
1712 unit=_('files'), total=len(changedfiles))
1712 unit=_('files'), total=len(changedfiles))
1713 return fstate[1][x]
1713 return fstate[1][x]
1714
1714
1715 bundler = changegroup.bundle10(lookup)
1715 bundler = changegroup.bundle10(lookup)
1716 reorder = self.ui.config('bundle', 'reorder', 'auto')
1716 reorder = self.ui.config('bundle', 'reorder', 'auto')
1717 if reorder == 'auto':
1717 if reorder == 'auto':
1718 reorder = None
1718 reorder = None
1719 else:
1719 else:
1720 reorder = util.parsebool(reorder)
1720 reorder = util.parsebool(reorder)
1721
1721
1722 def gengroup():
1722 def gengroup():
1723 # Create a changenode group generator that will call our functions
1723 # Create a changenode group generator that will call our functions
1724 # back to lookup the owning changenode and collect information.
1724 # back to lookup the owning changenode and collect information.
1725 for chunk in cl.group(csets, bundler, reorder=reorder):
1725 for chunk in cl.group(csets, bundler, reorder=reorder):
1726 yield chunk
1726 yield chunk
1727 self.ui.progress(_('bundling'), None)
1727 self.ui.progress(_('bundling'), None)
1728
1728
1729 # Create a generator for the manifestnodes that calls our lookup
1729 # Create a generator for the manifestnodes that calls our lookup
1730 # and data collection functions back.
1730 # and data collection functions back.
1731 count[0] = 0
1731 count[0] = 0
1732 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1732 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1733 yield chunk
1733 yield chunk
1734 self.ui.progress(_('bundling'), None)
1734 self.ui.progress(_('bundling'), None)
1735
1735
1736 mfs.clear()
1736 mfs.clear()
1737
1737
1738 # Go through all our files in order sorted by name.
1738 # Go through all our files in order sorted by name.
1739 count[0] = 0
1739 count[0] = 0
1740 for fname in sorted(changedfiles):
1740 for fname in sorted(changedfiles):
1741 filerevlog = self.file(fname)
1741 filerevlog = self.file(fname)
1742 if not len(filerevlog):
1742 if not len(filerevlog):
1743 raise util.Abort(_("empty or missing revlog for %s") % fname)
1743 raise util.Abort(_("empty or missing revlog for %s") % fname)
1744 fstate[0] = fname
1744 fstate[0] = fname
1745 fstate[1] = fnodes.pop(fname, {})
1745 fstate[1] = fnodes.pop(fname, {})
1746
1746
1747 nodelist = prune(filerevlog, fstate[1])
1747 nodelist = prune(filerevlog, fstate[1])
1748 if nodelist:
1748 if nodelist:
1749 count[0] += 1
1749 count[0] += 1
1750 yield bundler.fileheader(fname)
1750 yield bundler.fileheader(fname)
1751 for chunk in filerevlog.group(nodelist, bundler, reorder):
1751 for chunk in filerevlog.group(nodelist, bundler, reorder):
1752 yield chunk
1752 yield chunk
1753
1753
1754 # Signal that no more groups are left.
1754 # Signal that no more groups are left.
1755 yield bundler.close()
1755 yield bundler.close()
1756 self.ui.progress(_('bundling'), None)
1756 self.ui.progress(_('bundling'), None)
1757
1757
1758 if csets:
1758 if csets:
1759 self.hook('outgoing', node=hex(csets[0]), source=source)
1759 self.hook('outgoing', node=hex(csets[0]), source=source)
1760
1760
1761 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1761 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1762
1762
1763 def changegroup(self, basenodes, source):
1763 def changegroup(self, basenodes, source):
1764 # to avoid a race we use changegroupsubset() (issue1320)
1764 # to avoid a race we use changegroupsubset() (issue1320)
1765 return self.changegroupsubset(basenodes, self.heads(), source)
1765 return self.changegroupsubset(basenodes, self.heads(), source)
1766
1766
1767 def _changegroup(self, nodes, source):
1767 def _changegroup(self, nodes, source):
1768 """Compute the changegroup of all nodes that we have that a recipient
1768 """Compute the changegroup of all nodes that we have that a recipient
1769 doesn't. Return a chunkbuffer object whose read() method will return
1769 doesn't. Return a chunkbuffer object whose read() method will return
1770 successive changegroup chunks.
1770 successive changegroup chunks.
1771
1771
1772 This is much easier than the previous function as we can assume that
1772 This is much easier than the previous function as we can assume that
1773 the recipient has any changenode we aren't sending them.
1773 the recipient has any changenode we aren't sending them.
1774
1774
1775 nodes is the set of nodes to send"""
1775 nodes is the set of nodes to send"""
1776
1776
1777 cl = self.changelog
1777 cl = self.changelog
1778 mf = self.manifest
1778 mf = self.manifest
1779 mfs = {}
1779 mfs = {}
1780 changedfiles = set()
1780 changedfiles = set()
1781 fstate = ['']
1781 fstate = ['']
1782 count = [0]
1782 count = [0]
1783
1783
1784 self.hook('preoutgoing', throw=True, source=source)
1784 self.hook('preoutgoing', throw=True, source=source)
1785 self.changegroupinfo(nodes, source)
1785 self.changegroupinfo(nodes, source)
1786
1786
1787 revset = set([cl.rev(n) for n in nodes])
1787 revset = set([cl.rev(n) for n in nodes])
1788
1788
1789 def gennodelst(log):
1789 def gennodelst(log):
1790 return [log.node(r) for r in log if log.linkrev(r) in revset]
1790 return [log.node(r) for r in log if log.linkrev(r) in revset]
1791
1791
1792 def lookup(revlog, x):
1792 def lookup(revlog, x):
1793 if revlog == cl:
1793 if revlog == cl:
1794 c = cl.read(x)
1794 c = cl.read(x)
1795 changedfiles.update(c[3])
1795 changedfiles.update(c[3])
1796 mfs.setdefault(c[0], x)
1796 mfs.setdefault(c[0], x)
1797 count[0] += 1
1797 count[0] += 1
1798 self.ui.progress(_('bundling'), count[0],
1798 self.ui.progress(_('bundling'), count[0],
1799 unit=_('changesets'), total=len(nodes))
1799 unit=_('changesets'), total=len(nodes))
1800 return x
1800 return x
1801 elif revlog == mf:
1801 elif revlog == mf:
1802 count[0] += 1
1802 count[0] += 1
1803 self.ui.progress(_('bundling'), count[0],
1803 self.ui.progress(_('bundling'), count[0],
1804 unit=_('manifests'), total=len(mfs))
1804 unit=_('manifests'), total=len(mfs))
1805 return cl.node(revlog.linkrev(revlog.rev(x)))
1805 return cl.node(revlog.linkrev(revlog.rev(x)))
1806 else:
1806 else:
1807 self.ui.progress(
1807 self.ui.progress(
1808 _('bundling'), count[0], item=fstate[0],
1808 _('bundling'), count[0], item=fstate[0],
1809 total=len(changedfiles), unit=_('files'))
1809 total=len(changedfiles), unit=_('files'))
1810 return cl.node(revlog.linkrev(revlog.rev(x)))
1810 return cl.node(revlog.linkrev(revlog.rev(x)))
1811
1811
1812 bundler = changegroup.bundle10(lookup)
1812 bundler = changegroup.bundle10(lookup)
1813 reorder = self.ui.config('bundle', 'reorder', 'auto')
1813 reorder = self.ui.config('bundle', 'reorder', 'auto')
1814 if reorder == 'auto':
1814 if reorder == 'auto':
1815 reorder = None
1815 reorder = None
1816 else:
1816 else:
1817 reorder = util.parsebool(reorder)
1817 reorder = util.parsebool(reorder)
1818
1818
1819 def gengroup():
1819 def gengroup():
1820 '''yield a sequence of changegroup chunks (strings)'''
1820 '''yield a sequence of changegroup chunks (strings)'''
1821 # construct a list of all changed files
1821 # construct a list of all changed files
1822
1822
1823 for chunk in cl.group(nodes, bundler, reorder=reorder):
1823 for chunk in cl.group(nodes, bundler, reorder=reorder):
1824 yield chunk
1824 yield chunk
1825 self.ui.progress(_('bundling'), None)
1825 self.ui.progress(_('bundling'), None)
1826
1826
1827 count[0] = 0
1827 count[0] = 0
1828 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1828 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1829 yield chunk
1829 yield chunk
1830 self.ui.progress(_('bundling'), None)
1830 self.ui.progress(_('bundling'), None)
1831
1831
1832 count[0] = 0
1832 count[0] = 0
1833 for fname in sorted(changedfiles):
1833 for fname in sorted(changedfiles):
1834 filerevlog = self.file(fname)
1834 filerevlog = self.file(fname)
1835 if not len(filerevlog):
1835 if not len(filerevlog):
1836 raise util.Abort(_("empty or missing revlog for %s") % fname)
1836 raise util.Abort(_("empty or missing revlog for %s") % fname)
1837 fstate[0] = fname
1837 fstate[0] = fname
1838 nodelist = gennodelst(filerevlog)
1838 nodelist = gennodelst(filerevlog)
1839 if nodelist:
1839 if nodelist:
1840 count[0] += 1
1840 count[0] += 1
1841 yield bundler.fileheader(fname)
1841 yield bundler.fileheader(fname)
1842 for chunk in filerevlog.group(nodelist, bundler, reorder):
1842 for chunk in filerevlog.group(nodelist, bundler, reorder):
1843 yield chunk
1843 yield chunk
1844 yield bundler.close()
1844 yield bundler.close()
1845 self.ui.progress(_('bundling'), None)
1845 self.ui.progress(_('bundling'), None)
1846
1846
1847 if nodes:
1847 if nodes:
1848 self.hook('outgoing', node=hex(nodes[0]), source=source)
1848 self.hook('outgoing', node=hex(nodes[0]), source=source)
1849
1849
1850 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1850 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1851
1851
1852 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1852 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1853 """Add the changegroup returned by source.read() to this repo.
1853 """Add the changegroup returned by source.read() to this repo.
1854 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1854 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1855 the URL of the repo where this changegroup is coming from.
1855 the URL of the repo where this changegroup is coming from.
1856 If lock is not None, the function takes ownership of the lock
1856 If lock is not None, the function takes ownership of the lock
1857 and releases it after the changegroup is added.
1857 and releases it after the changegroup is added.
1858
1858
1859 Return an integer summarizing the change to this repo:
1859 Return an integer summarizing the change to this repo:
1860 - nothing changed or no source: 0
1860 - nothing changed or no source: 0
1861 - more heads than before: 1+added heads (2..n)
1861 - more heads than before: 1+added heads (2..n)
1862 - fewer heads than before: -1-removed heads (-2..-n)
1862 - fewer heads than before: -1-removed heads (-2..-n)
1863 - number of heads stays the same: 1
1863 - number of heads stays the same: 1
1864 """
1864 """
1865 def csmap(x):
1865 def csmap(x):
1866 self.ui.debug("add changeset %s\n" % short(x))
1866 self.ui.debug("add changeset %s\n" % short(x))
1867 return len(cl)
1867 return len(cl)
1868
1868
1869 def revmap(x):
1869 def revmap(x):
1870 return cl.rev(x)
1870 return cl.rev(x)
1871
1871
1872 if not source:
1872 if not source:
1873 return 0
1873 return 0
1874
1874
1875 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1875 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1876
1876
1877 changesets = files = revisions = 0
1877 changesets = files = revisions = 0
1878 efiles = set()
1878 efiles = set()
1879
1879
1880 # write changelog data to temp files so concurrent readers will not see
1880 # write changelog data to temp files so concurrent readers will not see
1881 # inconsistent view
1881 # inconsistent view
1882 cl = self.changelog
1882 cl = self.changelog
1883 cl.delayupdate()
1883 cl.delayupdate()
1884 oldheads = cl.heads()
1884 oldheads = cl.heads()
1885
1885
1886 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1886 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1887 try:
1887 try:
1888 trp = weakref.proxy(tr)
1888 trp = weakref.proxy(tr)
1889 # pull off the changeset group
1889 # pull off the changeset group
1890 self.ui.status(_("adding changesets\n"))
1890 self.ui.status(_("adding changesets\n"))
1891 clstart = len(cl)
1891 clstart = len(cl)
1892 class prog(object):
1892 class prog(object):
1893 step = _('changesets')
1893 step = _('changesets')
1894 count = 1
1894 count = 1
1895 ui = self.ui
1895 ui = self.ui
1896 total = None
1896 total = None
1897 def __call__(self):
1897 def __call__(self):
1898 self.ui.progress(self.step, self.count, unit=_('chunks'),
1898 self.ui.progress(self.step, self.count, unit=_('chunks'),
1899 total=self.total)
1899 total=self.total)
1900 self.count += 1
1900 self.count += 1
1901 pr = prog()
1901 pr = prog()
1902 source.callback = pr
1902 source.callback = pr
1903
1903
1904 source.changelogheader()
1904 source.changelogheader()
1905 if (cl.addgroup(source, csmap, trp) is None
1905 if (cl.addgroup(source, csmap, trp) is None
1906 and not emptyok):
1906 and not emptyok):
1907 raise util.Abort(_("received changelog group is empty"))
1907 raise util.Abort(_("received changelog group is empty"))
1908 clend = len(cl)
1908 clend = len(cl)
1909 changesets = clend - clstart
1909 changesets = clend - clstart
1910 for c in xrange(clstart, clend):
1910 for c in xrange(clstart, clend):
1911 efiles.update(self[c].files())
1911 efiles.update(self[c].files())
1912 efiles = len(efiles)
1912 efiles = len(efiles)
1913 self.ui.progress(_('changesets'), None)
1913 self.ui.progress(_('changesets'), None)
1914
1914
1915 # pull off the manifest group
1915 # pull off the manifest group
1916 self.ui.status(_("adding manifests\n"))
1916 self.ui.status(_("adding manifests\n"))
1917 pr.step = _('manifests')
1917 pr.step = _('manifests')
1918 pr.count = 1
1918 pr.count = 1
1919 pr.total = changesets # manifests <= changesets
1919 pr.total = changesets # manifests <= changesets
1920 # no need to check for empty manifest group here:
1920 # no need to check for empty manifest group here:
1921 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1921 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1922 # no new manifest will be created and the manifest group will
1922 # no new manifest will be created and the manifest group will
1923 # be empty during the pull
1923 # be empty during the pull
1924 source.manifestheader()
1924 source.manifestheader()
1925 self.manifest.addgroup(source, revmap, trp)
1925 self.manifest.addgroup(source, revmap, trp)
1926 self.ui.progress(_('manifests'), None)
1926 self.ui.progress(_('manifests'), None)
1927
1927
1928 needfiles = {}
1928 needfiles = {}
1929 if self.ui.configbool('server', 'validate', default=False):
1929 if self.ui.configbool('server', 'validate', default=False):
1930 # validate incoming csets have their manifests
1930 # validate incoming csets have their manifests
1931 for cset in xrange(clstart, clend):
1931 for cset in xrange(clstart, clend):
1932 mfest = self.changelog.read(self.changelog.node(cset))[0]
1932 mfest = self.changelog.read(self.changelog.node(cset))[0]
1933 mfest = self.manifest.readdelta(mfest)
1933 mfest = self.manifest.readdelta(mfest)
1934 # store file nodes we must see
1934 # store file nodes we must see
1935 for f, n in mfest.iteritems():
1935 for f, n in mfest.iteritems():
1936 needfiles.setdefault(f, set()).add(n)
1936 needfiles.setdefault(f, set()).add(n)
1937
1937
1938 # process the files
1938 # process the files
1939 self.ui.status(_("adding file changes\n"))
1939 self.ui.status(_("adding file changes\n"))
1940 pr.step = _('files')
1940 pr.step = _('files')
1941 pr.count = 1
1941 pr.count = 1
1942 pr.total = efiles
1942 pr.total = efiles
1943 source.callback = None
1943 source.callback = None
1944
1944
1945 while True:
1945 while True:
1946 chunkdata = source.filelogheader()
1946 chunkdata = source.filelogheader()
1947 if not chunkdata:
1947 if not chunkdata:
1948 break
1948 break
1949 f = chunkdata["filename"]
1949 f = chunkdata["filename"]
1950 self.ui.debug("adding %s revisions\n" % f)
1950 self.ui.debug("adding %s revisions\n" % f)
1951 pr()
1951 pr()
1952 fl = self.file(f)
1952 fl = self.file(f)
1953 o = len(fl)
1953 o = len(fl)
1954 if fl.addgroup(source, revmap, trp) is None:
1954 if fl.addgroup(source, revmap, trp) is None:
1955 raise util.Abort(_("received file revlog group is empty"))
1955 raise util.Abort(_("received file revlog group is empty"))
1956 revisions += len(fl) - o
1956 revisions += len(fl) - o
1957 files += 1
1957 files += 1
1958 if f in needfiles:
1958 if f in needfiles:
1959 needs = needfiles[f]
1959 needs = needfiles[f]
1960 for new in xrange(o, len(fl)):
1960 for new in xrange(o, len(fl)):
1961 n = fl.node(new)
1961 n = fl.node(new)
1962 if n in needs:
1962 if n in needs:
1963 needs.remove(n)
1963 needs.remove(n)
1964 if not needs:
1964 if not needs:
1965 del needfiles[f]
1965 del needfiles[f]
1966 self.ui.progress(_('files'), None)
1966 self.ui.progress(_('files'), None)
1967
1967
1968 for f, needs in needfiles.iteritems():
1968 for f, needs in needfiles.iteritems():
1969 fl = self.file(f)
1969 fl = self.file(f)
1970 for n in needs:
1970 for n in needs:
1971 try:
1971 try:
1972 fl.rev(n)
1972 fl.rev(n)
1973 except error.LookupError:
1973 except error.LookupError:
1974 raise util.Abort(
1974 raise util.Abort(
1975 _('missing file data for %s:%s - run hg verify') %
1975 _('missing file data for %s:%s - run hg verify') %
1976 (f, hex(n)))
1976 (f, hex(n)))
1977
1977
1978 dh = 0
1978 dh = 0
1979 if oldheads:
1979 if oldheads:
1980 heads = cl.heads()
1980 heads = cl.heads()
1981 dh = len(heads) - len(oldheads)
1981 dh = len(heads) - len(oldheads)
1982 for h in heads:
1982 for h in heads:
1983 if h not in oldheads and 'close' in self[h].extra():
1983 if h not in oldheads and 'close' in self[h].extra():
1984 dh -= 1
1984 dh -= 1
1985 htext = ""
1985 htext = ""
1986 if dh:
1986 if dh:
1987 htext = _(" (%+d heads)") % dh
1987 htext = _(" (%+d heads)") % dh
1988
1988
1989 self.ui.status(_("added %d changesets"
1989 self.ui.status(_("added %d changesets"
1990 " with %d changes to %d files%s\n")
1990 " with %d changes to %d files%s\n")
1991 % (changesets, revisions, files, htext))
1991 % (changesets, revisions, files, htext))
1992
1992
1993 if changesets > 0:
1993 if changesets > 0:
1994 p = lambda: cl.writepending() and self.root or ""
1994 p = lambda: cl.writepending() and self.root or ""
1995 self.hook('pretxnchangegroup', throw=True,
1995 self.hook('pretxnchangegroup', throw=True,
1996 node=hex(cl.node(clstart)), source=srctype,
1996 node=hex(cl.node(clstart)), source=srctype,
1997 url=url, pending=p)
1997 url=url, pending=p)
1998
1998
1999 added = [cl.node(r) for r in xrange(clstart, clend)]
1999 added = [cl.node(r) for r in xrange(clstart, clend)]
2000 if srctype != 'strip':
2000 if srctype != 'strip':
2001 phases.advanceboundary(self, 0, added)
2001 phases.advanceboundary(self, 0, added)
2002 # make changelog see real files again
2002 # make changelog see real files again
2003 cl.finalize(trp)
2003 cl.finalize(trp)
2004
2004
2005 tr.close()
2005 tr.close()
2006 finally:
2007 tr.release()
2008 if lock:
2009 lock.release()
2010
2006
2007 def postaddchangegroup():
2011 if changesets > 0:
2008 if changesets > 0:
2012 # forcefully update the on-disk branch cache
2009 # forcefully update the on-disk branch cache
2013 self.ui.debug("updating the branch cache\n")
2010 self.ui.debug("updating the branch cache\n")
2014 self.updatebranchcache()
2011 self.updatebranchcache()
2015 self.hook("changegroup", node=hex(cl.node(clstart)),
2012 self.hook("changegroup", node=hex(cl.node(clstart)),
2016 source=srctype, url=url)
2013 source=srctype, url=url)
2017
2014
2018 for n in added:
2015 for n in added:
2019 self.hook("incoming", node=hex(n), source=srctype, url=url)
2016 self.hook("incoming", node=hex(n), source=srctype,
2017 url=url)
2018 self._postrelease(postaddchangegroup)
2020
2019
2020 finally:
2021 tr.release()
2022 if lock:
2023 lock.release()
2021 # never return 0 here:
2024 # never return 0 here:
2022 if dh < 0:
2025 if dh < 0:
2023 return dh - 1
2026 return dh - 1
2024 else:
2027 else:
2025 return dh + 1
2028 return dh + 1
2026
2029
2027 def stream_in(self, remote, requirements):
2030 def stream_in(self, remote, requirements):
2028 lock = self.lock()
2031 lock = self.lock()
2029 try:
2032 try:
2030 fp = remote.stream_out()
2033 fp = remote.stream_out()
2031 l = fp.readline()
2034 l = fp.readline()
2032 try:
2035 try:
2033 resp = int(l)
2036 resp = int(l)
2034 except ValueError:
2037 except ValueError:
2035 raise error.ResponseError(
2038 raise error.ResponseError(
2036 _('Unexpected response from remote server:'), l)
2039 _('Unexpected response from remote server:'), l)
2037 if resp == 1:
2040 if resp == 1:
2038 raise util.Abort(_('operation forbidden by server'))
2041 raise util.Abort(_('operation forbidden by server'))
2039 elif resp == 2:
2042 elif resp == 2:
2040 raise util.Abort(_('locking the remote repository failed'))
2043 raise util.Abort(_('locking the remote repository failed'))
2041 elif resp != 0:
2044 elif resp != 0:
2042 raise util.Abort(_('the server sent an unknown error code'))
2045 raise util.Abort(_('the server sent an unknown error code'))
2043 self.ui.status(_('streaming all changes\n'))
2046 self.ui.status(_('streaming all changes\n'))
2044 l = fp.readline()
2047 l = fp.readline()
2045 try:
2048 try:
2046 total_files, total_bytes = map(int, l.split(' ', 1))
2049 total_files, total_bytes = map(int, l.split(' ', 1))
2047 except (ValueError, TypeError):
2050 except (ValueError, TypeError):
2048 raise error.ResponseError(
2051 raise error.ResponseError(
2049 _('Unexpected response from remote server:'), l)
2052 _('Unexpected response from remote server:'), l)
2050 self.ui.status(_('%d files to transfer, %s of data\n') %
2053 self.ui.status(_('%d files to transfer, %s of data\n') %
2051 (total_files, util.bytecount(total_bytes)))
2054 (total_files, util.bytecount(total_bytes)))
2052 start = time.time()
2055 start = time.time()
2053 for i in xrange(total_files):
2056 for i in xrange(total_files):
2054 # XXX doesn't support '\n' or '\r' in filenames
2057 # XXX doesn't support '\n' or '\r' in filenames
2055 l = fp.readline()
2058 l = fp.readline()
2056 try:
2059 try:
2057 name, size = l.split('\0', 1)
2060 name, size = l.split('\0', 1)
2058 size = int(size)
2061 size = int(size)
2059 except (ValueError, TypeError):
2062 except (ValueError, TypeError):
2060 raise error.ResponseError(
2063 raise error.ResponseError(
2061 _('Unexpected response from remote server:'), l)
2064 _('Unexpected response from remote server:'), l)
2062 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2065 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2063 # for backwards compat, name was partially encoded
2066 # for backwards compat, name was partially encoded
2064 ofp = self.sopener(store.decodedir(name), 'w')
2067 ofp = self.sopener(store.decodedir(name), 'w')
2065 for chunk in util.filechunkiter(fp, limit=size):
2068 for chunk in util.filechunkiter(fp, limit=size):
2066 ofp.write(chunk)
2069 ofp.write(chunk)
2067 ofp.close()
2070 ofp.close()
2068 elapsed = time.time() - start
2071 elapsed = time.time() - start
2069 if elapsed <= 0:
2072 if elapsed <= 0:
2070 elapsed = 0.001
2073 elapsed = 0.001
2071 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2074 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2072 (util.bytecount(total_bytes), elapsed,
2075 (util.bytecount(total_bytes), elapsed,
2073 util.bytecount(total_bytes / elapsed)))
2076 util.bytecount(total_bytes / elapsed)))
2074
2077
2075 # new requirements = old non-format requirements + new format-related
2078 # new requirements = old non-format requirements + new format-related
2076 # requirements from the streamed-in repository
2079 # requirements from the streamed-in repository
2077 requirements.update(set(self.requirements) - self.supportedformats)
2080 requirements.update(set(self.requirements) - self.supportedformats)
2078 self._applyrequirements(requirements)
2081 self._applyrequirements(requirements)
2079 self._writerequirements()
2082 self._writerequirements()
2080
2083
2081 self.invalidate()
2084 self.invalidate()
2082 return len(self.heads()) + 1
2085 return len(self.heads()) + 1
2083 finally:
2086 finally:
2084 lock.release()
2087 lock.release()
2085
2088
2086 def clone(self, remote, heads=[], stream=False):
2089 def clone(self, remote, heads=[], stream=False):
2087 '''clone remote repository.
2090 '''clone remote repository.
2088
2091
2089 keyword arguments:
2092 keyword arguments:
2090 heads: list of revs to clone (forces use of pull)
2093 heads: list of revs to clone (forces use of pull)
2091 stream: use streaming clone if possible'''
2094 stream: use streaming clone if possible'''
2092
2095
2093 # now, all clients that can request uncompressed clones can
2096 # now, all clients that can request uncompressed clones can
2094 # read repo formats supported by all servers that can serve
2097 # read repo formats supported by all servers that can serve
2095 # them.
2098 # them.
2096
2099
2097 # if revlog format changes, client will have to check version
2100 # if revlog format changes, client will have to check version
2098 # and format flags on "stream" capability, and use
2101 # and format flags on "stream" capability, and use
2099 # uncompressed only if compatible.
2102 # uncompressed only if compatible.
2100
2103
2101 if stream and not heads:
2104 if stream and not heads:
2102 # 'stream' means remote revlog format is revlogv1 only
2105 # 'stream' means remote revlog format is revlogv1 only
2103 if remote.capable('stream'):
2106 if remote.capable('stream'):
2104 return self.stream_in(remote, set(('revlogv1',)))
2107 return self.stream_in(remote, set(('revlogv1',)))
2105 # otherwise, 'streamreqs' contains the remote revlog format
2108 # otherwise, 'streamreqs' contains the remote revlog format
2106 streamreqs = remote.capable('streamreqs')
2109 streamreqs = remote.capable('streamreqs')
2107 if streamreqs:
2110 if streamreqs:
2108 streamreqs = set(streamreqs.split(','))
2111 streamreqs = set(streamreqs.split(','))
2109 # if we support it, stream in and adjust our requirements
2112 # if we support it, stream in and adjust our requirements
2110 if not streamreqs - self.supportedformats:
2113 if not streamreqs - self.supportedformats:
2111 return self.stream_in(remote, streamreqs)
2114 return self.stream_in(remote, streamreqs)
2112 return self.pull(remote, heads)
2115 return self.pull(remote, heads)
2113
2116
2114 def pushkey(self, namespace, key, old, new):
2117 def pushkey(self, namespace, key, old, new):
2115 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2118 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2116 old=old, new=new)
2119 old=old, new=new)
2117 ret = pushkey.push(self, namespace, key, old, new)
2120 ret = pushkey.push(self, namespace, key, old, new)
2118 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2121 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2119 ret=ret)
2122 ret=ret)
2120 return ret
2123 return ret
2121
2124
2122 def listkeys(self, namespace):
2125 def listkeys(self, namespace):
2123 self.hook('prelistkeys', throw=True, namespace=namespace)
2126 self.hook('prelistkeys', throw=True, namespace=namespace)
2124 values = pushkey.list(self, namespace)
2127 values = pushkey.list(self, namespace)
2125 self.hook('listkeys', namespace=namespace, values=values)
2128 self.hook('listkeys', namespace=namespace, values=values)
2126 return values
2129 return values
2127
2130
2128 def debugwireargs(self, one, two, three=None, four=None, five=None):
2131 def debugwireargs(self, one, two, three=None, four=None, five=None):
2129 '''used to test argument passing over the wire'''
2132 '''used to test argument passing over the wire'''
2130 return "%s %s %s %s %s" % (one, two, three, four, five)
2133 return "%s %s %s %s %s" % (one, two, three, four, five)
2131
2134
2132 def savecommitmessage(self, text):
2135 def savecommitmessage(self, text):
2133 fp = self.opener('last-message.txt', 'wb')
2136 fp = self.opener('last-message.txt', 'wb')
2134 try:
2137 try:
2135 fp.write(text)
2138 fp.write(text)
2136 finally:
2139 finally:
2137 fp.close()
2140 fp.close()
2138 return self.pathto(fp.name[len(self.root)+1:])
2141 return self.pathto(fp.name[len(self.root)+1:])
2139
2142
2140 # used to avoid circular references so destructors work
2143 # used to avoid circular references so destructors work
2141 def aftertrans(files):
2144 def aftertrans(files):
2142 renamefiles = [tuple(t) for t in files]
2145 renamefiles = [tuple(t) for t in files]
2143 def a():
2146 def a():
2144 for src, dest in renamefiles:
2147 for src, dest in renamefiles:
2145 util.rename(src, dest)
2148 util.rename(src, dest)
2146 return a
2149 return a
2147
2150
2148 def undoname(fn):
2151 def undoname(fn):
2149 base, name = os.path.split(fn)
2152 base, name = os.path.split(fn)
2150 assert name.startswith('journal')
2153 assert name.startswith('journal')
2151 return os.path.join(base, name.replace('journal', 'undo', 1))
2154 return os.path.join(base, name.replace('journal', 'undo', 1))
2152
2155
2153 def instance(ui, path, create):
2156 def instance(ui, path, create):
2154 return localrepository(ui, util.urllocalpath(path), create)
2157 return localrepository(ui, util.urllocalpath(path), create)
2155
2158
2156 def islocal(path):
2159 def islocal(path):
2157 return True
2160 return True
General Comments 0
You need to be logged in to leave comments. Login now