##// END OF EJS Templates
Add a phases.new-commit option to control minimal phase of new commit...
Pierre-Yves David -
r15706:ebaefd8c default
parent child Browse files
Show More
@@ -1,2212 +1,2219 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import scmutil, util, extensions, hook, error, revset
13 import scmutil, util, extensions, hook, error, revset
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 from lock import release
17 from lock import release
18 import weakref, errno, os, time, inspect
18 import weakref, errno, os, time, inspect
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20 filecache = scmutil.filecache
20 filecache = scmutil.filecache
21
21
22 class localrepository(repo.repository):
22 class localrepository(repo.repository):
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
24 'known', 'getbundle'))
24 'known', 'getbundle'))
25 supportedformats = set(('revlogv1', 'generaldelta'))
25 supportedformats = set(('revlogv1', 'generaldelta'))
26 supported = supportedformats | set(('store', 'fncache', 'shared',
26 supported = supportedformats | set(('store', 'fncache', 'shared',
27 'dotencode'))
27 'dotencode'))
28
28
29 def __init__(self, baseui, path=None, create=False):
29 def __init__(self, baseui, path=None, create=False):
30 repo.repository.__init__(self)
30 repo.repository.__init__(self)
31 self.root = os.path.realpath(util.expandpath(path))
31 self.root = os.path.realpath(util.expandpath(path))
32 self.path = os.path.join(self.root, ".hg")
32 self.path = os.path.join(self.root, ".hg")
33 self.origroot = path
33 self.origroot = path
34 self.auditor = scmutil.pathauditor(self.root, self._checknested)
34 self.auditor = scmutil.pathauditor(self.root, self._checknested)
35 self.opener = scmutil.opener(self.path)
35 self.opener = scmutil.opener(self.path)
36 self.wopener = scmutil.opener(self.root)
36 self.wopener = scmutil.opener(self.root)
37 self.baseui = baseui
37 self.baseui = baseui
38 self.ui = baseui.copy()
38 self.ui = baseui.copy()
39 self._dirtyphases = False
39 self._dirtyphases = False
40
40
41 try:
41 try:
42 self.ui.readconfig(self.join("hgrc"), self.root)
42 self.ui.readconfig(self.join("hgrc"), self.root)
43 extensions.loadall(self.ui)
43 extensions.loadall(self.ui)
44 except IOError:
44 except IOError:
45 pass
45 pass
46
46
47 if not os.path.isdir(self.path):
47 if not os.path.isdir(self.path):
48 if create:
48 if create:
49 if not os.path.exists(path):
49 if not os.path.exists(path):
50 util.makedirs(path)
50 util.makedirs(path)
51 util.makedir(self.path, notindexed=True)
51 util.makedir(self.path, notindexed=True)
52 requirements = ["revlogv1"]
52 requirements = ["revlogv1"]
53 if self.ui.configbool('format', 'usestore', True):
53 if self.ui.configbool('format', 'usestore', True):
54 os.mkdir(os.path.join(self.path, "store"))
54 os.mkdir(os.path.join(self.path, "store"))
55 requirements.append("store")
55 requirements.append("store")
56 if self.ui.configbool('format', 'usefncache', True):
56 if self.ui.configbool('format', 'usefncache', True):
57 requirements.append("fncache")
57 requirements.append("fncache")
58 if self.ui.configbool('format', 'dotencode', True):
58 if self.ui.configbool('format', 'dotencode', True):
59 requirements.append('dotencode')
59 requirements.append('dotencode')
60 # create an invalid changelog
60 # create an invalid changelog
61 self.opener.append(
61 self.opener.append(
62 "00changelog.i",
62 "00changelog.i",
63 '\0\0\0\2' # represents revlogv2
63 '\0\0\0\2' # represents revlogv2
64 ' dummy changelog to prevent using the old repo layout'
64 ' dummy changelog to prevent using the old repo layout'
65 )
65 )
66 if self.ui.configbool('format', 'generaldelta', False):
66 if self.ui.configbool('format', 'generaldelta', False):
67 requirements.append("generaldelta")
67 requirements.append("generaldelta")
68 requirements = set(requirements)
68 requirements = set(requirements)
69 else:
69 else:
70 raise error.RepoError(_("repository %s not found") % path)
70 raise error.RepoError(_("repository %s not found") % path)
71 elif create:
71 elif create:
72 raise error.RepoError(_("repository %s already exists") % path)
72 raise error.RepoError(_("repository %s already exists") % path)
73 else:
73 else:
74 try:
74 try:
75 requirements = scmutil.readrequires(self.opener, self.supported)
75 requirements = scmutil.readrequires(self.opener, self.supported)
76 except IOError, inst:
76 except IOError, inst:
77 if inst.errno != errno.ENOENT:
77 if inst.errno != errno.ENOENT:
78 raise
78 raise
79 requirements = set()
79 requirements = set()
80
80
81 self.sharedpath = self.path
81 self.sharedpath = self.path
82 try:
82 try:
83 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
83 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
84 if not os.path.exists(s):
84 if not os.path.exists(s):
85 raise error.RepoError(
85 raise error.RepoError(
86 _('.hg/sharedpath points to nonexistent directory %s') % s)
86 _('.hg/sharedpath points to nonexistent directory %s') % s)
87 self.sharedpath = s
87 self.sharedpath = s
88 except IOError, inst:
88 except IOError, inst:
89 if inst.errno != errno.ENOENT:
89 if inst.errno != errno.ENOENT:
90 raise
90 raise
91
91
92 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
92 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
93 self.spath = self.store.path
93 self.spath = self.store.path
94 self.sopener = self.store.opener
94 self.sopener = self.store.opener
95 self.sjoin = self.store.join
95 self.sjoin = self.store.join
96 self.opener.createmode = self.store.createmode
96 self.opener.createmode = self.store.createmode
97 self._applyrequirements(requirements)
97 self._applyrequirements(requirements)
98 if create:
98 if create:
99 self._writerequirements()
99 self._writerequirements()
100
100
101
101
102 self._branchcache = None
102 self._branchcache = None
103 self._branchcachetip = None
103 self._branchcachetip = None
104 self.filterpats = {}
104 self.filterpats = {}
105 self._datafilters = {}
105 self._datafilters = {}
106 self._transref = self._lockref = self._wlockref = None
106 self._transref = self._lockref = self._wlockref = None
107
107
108 # A cache for various files under .hg/ that tracks file changes,
108 # A cache for various files under .hg/ that tracks file changes,
109 # (used by the filecache decorator)
109 # (used by the filecache decorator)
110 #
110 #
111 # Maps a property name to its util.filecacheentry
111 # Maps a property name to its util.filecacheentry
112 self._filecache = {}
112 self._filecache = {}
113
113
114 def _applyrequirements(self, requirements):
114 def _applyrequirements(self, requirements):
115 self.requirements = requirements
115 self.requirements = requirements
116 openerreqs = set(('revlogv1', 'generaldelta'))
116 openerreqs = set(('revlogv1', 'generaldelta'))
117 self.sopener.options = dict((r, 1) for r in requirements
117 self.sopener.options = dict((r, 1) for r in requirements
118 if r in openerreqs)
118 if r in openerreqs)
119
119
120 def _writerequirements(self):
120 def _writerequirements(self):
121 reqfile = self.opener("requires", "w")
121 reqfile = self.opener("requires", "w")
122 for r in self.requirements:
122 for r in self.requirements:
123 reqfile.write("%s\n" % r)
123 reqfile.write("%s\n" % r)
124 reqfile.close()
124 reqfile.close()
125
125
126 def _checknested(self, path):
126 def _checknested(self, path):
127 """Determine if path is a legal nested repository."""
127 """Determine if path is a legal nested repository."""
128 if not path.startswith(self.root):
128 if not path.startswith(self.root):
129 return False
129 return False
130 subpath = path[len(self.root) + 1:]
130 subpath = path[len(self.root) + 1:]
131
131
132 # XXX: Checking against the current working copy is wrong in
132 # XXX: Checking against the current working copy is wrong in
133 # the sense that it can reject things like
133 # the sense that it can reject things like
134 #
134 #
135 # $ hg cat -r 10 sub/x.txt
135 # $ hg cat -r 10 sub/x.txt
136 #
136 #
137 # if sub/ is no longer a subrepository in the working copy
137 # if sub/ is no longer a subrepository in the working copy
138 # parent revision.
138 # parent revision.
139 #
139 #
140 # However, it can of course also allow things that would have
140 # However, it can of course also allow things that would have
141 # been rejected before, such as the above cat command if sub/
141 # been rejected before, such as the above cat command if sub/
142 # is a subrepository now, but was a normal directory before.
142 # is a subrepository now, but was a normal directory before.
143 # The old path auditor would have rejected by mistake since it
143 # The old path auditor would have rejected by mistake since it
144 # panics when it sees sub/.hg/.
144 # panics when it sees sub/.hg/.
145 #
145 #
146 # All in all, checking against the working copy seems sensible
146 # All in all, checking against the working copy seems sensible
147 # since we want to prevent access to nested repositories on
147 # since we want to prevent access to nested repositories on
148 # the filesystem *now*.
148 # the filesystem *now*.
149 ctx = self[None]
149 ctx = self[None]
150 parts = util.splitpath(subpath)
150 parts = util.splitpath(subpath)
151 while parts:
151 while parts:
152 prefix = os.sep.join(parts)
152 prefix = os.sep.join(parts)
153 if prefix in ctx.substate:
153 if prefix in ctx.substate:
154 if prefix == subpath:
154 if prefix == subpath:
155 return True
155 return True
156 else:
156 else:
157 sub = ctx.sub(prefix)
157 sub = ctx.sub(prefix)
158 return sub.checknested(subpath[len(prefix) + 1:])
158 return sub.checknested(subpath[len(prefix) + 1:])
159 else:
159 else:
160 parts.pop()
160 parts.pop()
161 return False
161 return False
162
162
163 @filecache('bookmarks')
163 @filecache('bookmarks')
164 def _bookmarks(self):
164 def _bookmarks(self):
165 return bookmarks.read(self)
165 return bookmarks.read(self)
166
166
167 @filecache('bookmarks.current')
167 @filecache('bookmarks.current')
168 def _bookmarkcurrent(self):
168 def _bookmarkcurrent(self):
169 return bookmarks.readcurrent(self)
169 return bookmarks.readcurrent(self)
170
170
171 def _writebookmarks(self, marks):
171 def _writebookmarks(self, marks):
172 bookmarks.write(self)
172 bookmarks.write(self)
173
173
174 @filecache('phaseroots')
174 @filecache('phaseroots')
175 def _phaseroots(self):
175 def _phaseroots(self):
176 self._dirtyphases = False
176 self._dirtyphases = False
177 phaseroots = phases.readroots(self)
177 phaseroots = phases.readroots(self)
178 phases.filterunknown(self, phaseroots)
178 phases.filterunknown(self, phaseroots)
179 return phaseroots
179 return phaseroots
180
180
181 @propertycache
181 @propertycache
182 def _phaserev(self):
182 def _phaserev(self):
183 cache = [0] * len(self)
183 cache = [0] * len(self)
184 for phase in phases.trackedphases:
184 for phase in phases.trackedphases:
185 roots = map(self.changelog.rev, self._phaseroots[phase])
185 roots = map(self.changelog.rev, self._phaseroots[phase])
186 if roots:
186 if roots:
187 for rev in roots:
187 for rev in roots:
188 cache[rev] = phase
188 cache[rev] = phase
189 for rev in self.changelog.descendants(*roots):
189 for rev in self.changelog.descendants(*roots):
190 cache[rev] = phase
190 cache[rev] = phase
191 return cache
191 return cache
192
192
193 @filecache('00changelog.i', True)
193 @filecache('00changelog.i', True)
194 def changelog(self):
194 def changelog(self):
195 c = changelog.changelog(self.sopener)
195 c = changelog.changelog(self.sopener)
196 if 'HG_PENDING' in os.environ:
196 if 'HG_PENDING' in os.environ:
197 p = os.environ['HG_PENDING']
197 p = os.environ['HG_PENDING']
198 if p.startswith(self.root):
198 if p.startswith(self.root):
199 c.readpending('00changelog.i.a')
199 c.readpending('00changelog.i.a')
200 return c
200 return c
201
201
202 @filecache('00manifest.i', True)
202 @filecache('00manifest.i', True)
203 def manifest(self):
203 def manifest(self):
204 return manifest.manifest(self.sopener)
204 return manifest.manifest(self.sopener)
205
205
206 @filecache('dirstate')
206 @filecache('dirstate')
207 def dirstate(self):
207 def dirstate(self):
208 warned = [0]
208 warned = [0]
209 def validate(node):
209 def validate(node):
210 try:
210 try:
211 self.changelog.rev(node)
211 self.changelog.rev(node)
212 return node
212 return node
213 except error.LookupError:
213 except error.LookupError:
214 if not warned[0]:
214 if not warned[0]:
215 warned[0] = True
215 warned[0] = True
216 self.ui.warn(_("warning: ignoring unknown"
216 self.ui.warn(_("warning: ignoring unknown"
217 " working parent %s!\n") % short(node))
217 " working parent %s!\n") % short(node))
218 return nullid
218 return nullid
219
219
220 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
220 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
221
221
222 def __getitem__(self, changeid):
222 def __getitem__(self, changeid):
223 if changeid is None:
223 if changeid is None:
224 return context.workingctx(self)
224 return context.workingctx(self)
225 return context.changectx(self, changeid)
225 return context.changectx(self, changeid)
226
226
227 def __contains__(self, changeid):
227 def __contains__(self, changeid):
228 try:
228 try:
229 return bool(self.lookup(changeid))
229 return bool(self.lookup(changeid))
230 except error.RepoLookupError:
230 except error.RepoLookupError:
231 return False
231 return False
232
232
233 def __nonzero__(self):
233 def __nonzero__(self):
234 return True
234 return True
235
235
236 def __len__(self):
236 def __len__(self):
237 return len(self.changelog)
237 return len(self.changelog)
238
238
239 def __iter__(self):
239 def __iter__(self):
240 for i in xrange(len(self)):
240 for i in xrange(len(self)):
241 yield i
241 yield i
242
242
243 def revs(self, expr, *args):
243 def revs(self, expr, *args):
244 '''Return a list of revisions matching the given revset'''
244 '''Return a list of revisions matching the given revset'''
245 expr = revset.formatspec(expr, *args)
245 expr = revset.formatspec(expr, *args)
246 m = revset.match(None, expr)
246 m = revset.match(None, expr)
247 return [r for r in m(self, range(len(self)))]
247 return [r for r in m(self, range(len(self)))]
248
248
249 def set(self, expr, *args):
249 def set(self, expr, *args):
250 '''
250 '''
251 Yield a context for each matching revision, after doing arg
251 Yield a context for each matching revision, after doing arg
252 replacement via revset.formatspec
252 replacement via revset.formatspec
253 '''
253 '''
254 for r in self.revs(expr, *args):
254 for r in self.revs(expr, *args):
255 yield self[r]
255 yield self[r]
256
256
257 def url(self):
257 def url(self):
258 return 'file:' + self.root
258 return 'file:' + self.root
259
259
260 def hook(self, name, throw=False, **args):
260 def hook(self, name, throw=False, **args):
261 return hook.hook(self.ui, self, name, throw, **args)
261 return hook.hook(self.ui, self, name, throw, **args)
262
262
263 tag_disallowed = ':\r\n'
263 tag_disallowed = ':\r\n'
264
264
265 def _tag(self, names, node, message, local, user, date, extra={}):
265 def _tag(self, names, node, message, local, user, date, extra={}):
266 if isinstance(names, str):
266 if isinstance(names, str):
267 allchars = names
267 allchars = names
268 names = (names,)
268 names = (names,)
269 else:
269 else:
270 allchars = ''.join(names)
270 allchars = ''.join(names)
271 for c in self.tag_disallowed:
271 for c in self.tag_disallowed:
272 if c in allchars:
272 if c in allchars:
273 raise util.Abort(_('%r cannot be used in a tag name') % c)
273 raise util.Abort(_('%r cannot be used in a tag name') % c)
274
274
275 branches = self.branchmap()
275 branches = self.branchmap()
276 for name in names:
276 for name in names:
277 self.hook('pretag', throw=True, node=hex(node), tag=name,
277 self.hook('pretag', throw=True, node=hex(node), tag=name,
278 local=local)
278 local=local)
279 if name in branches:
279 if name in branches:
280 self.ui.warn(_("warning: tag %s conflicts with existing"
280 self.ui.warn(_("warning: tag %s conflicts with existing"
281 " branch name\n") % name)
281 " branch name\n") % name)
282
282
283 def writetags(fp, names, munge, prevtags):
283 def writetags(fp, names, munge, prevtags):
284 fp.seek(0, 2)
284 fp.seek(0, 2)
285 if prevtags and prevtags[-1] != '\n':
285 if prevtags and prevtags[-1] != '\n':
286 fp.write('\n')
286 fp.write('\n')
287 for name in names:
287 for name in names:
288 m = munge and munge(name) or name
288 m = munge and munge(name) or name
289 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
289 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
290 old = self.tags().get(name, nullid)
290 old = self.tags().get(name, nullid)
291 fp.write('%s %s\n' % (hex(old), m))
291 fp.write('%s %s\n' % (hex(old), m))
292 fp.write('%s %s\n' % (hex(node), m))
292 fp.write('%s %s\n' % (hex(node), m))
293 fp.close()
293 fp.close()
294
294
295 prevtags = ''
295 prevtags = ''
296 if local:
296 if local:
297 try:
297 try:
298 fp = self.opener('localtags', 'r+')
298 fp = self.opener('localtags', 'r+')
299 except IOError:
299 except IOError:
300 fp = self.opener('localtags', 'a')
300 fp = self.opener('localtags', 'a')
301 else:
301 else:
302 prevtags = fp.read()
302 prevtags = fp.read()
303
303
304 # local tags are stored in the current charset
304 # local tags are stored in the current charset
305 writetags(fp, names, None, prevtags)
305 writetags(fp, names, None, prevtags)
306 for name in names:
306 for name in names:
307 self.hook('tag', node=hex(node), tag=name, local=local)
307 self.hook('tag', node=hex(node), tag=name, local=local)
308 return
308 return
309
309
310 try:
310 try:
311 fp = self.wfile('.hgtags', 'rb+')
311 fp = self.wfile('.hgtags', 'rb+')
312 except IOError, e:
312 except IOError, e:
313 if e.errno != errno.ENOENT:
313 if e.errno != errno.ENOENT:
314 raise
314 raise
315 fp = self.wfile('.hgtags', 'ab')
315 fp = self.wfile('.hgtags', 'ab')
316 else:
316 else:
317 prevtags = fp.read()
317 prevtags = fp.read()
318
318
319 # committed tags are stored in UTF-8
319 # committed tags are stored in UTF-8
320 writetags(fp, names, encoding.fromlocal, prevtags)
320 writetags(fp, names, encoding.fromlocal, prevtags)
321
321
322 fp.close()
322 fp.close()
323
323
324 if '.hgtags' not in self.dirstate:
324 if '.hgtags' not in self.dirstate:
325 self[None].add(['.hgtags'])
325 self[None].add(['.hgtags'])
326
326
327 m = matchmod.exact(self.root, '', ['.hgtags'])
327 m = matchmod.exact(self.root, '', ['.hgtags'])
328 tagnode = self.commit(message, user, date, extra=extra, match=m)
328 tagnode = self.commit(message, user, date, extra=extra, match=m)
329
329
330 for name in names:
330 for name in names:
331 self.hook('tag', node=hex(node), tag=name, local=local)
331 self.hook('tag', node=hex(node), tag=name, local=local)
332
332
333 return tagnode
333 return tagnode
334
334
335 def tag(self, names, node, message, local, user, date):
335 def tag(self, names, node, message, local, user, date):
336 '''tag a revision with one or more symbolic names.
336 '''tag a revision with one or more symbolic names.
337
337
338 names is a list of strings or, when adding a single tag, names may be a
338 names is a list of strings or, when adding a single tag, names may be a
339 string.
339 string.
340
340
341 if local is True, the tags are stored in a per-repository file.
341 if local is True, the tags are stored in a per-repository file.
342 otherwise, they are stored in the .hgtags file, and a new
342 otherwise, they are stored in the .hgtags file, and a new
343 changeset is committed with the change.
343 changeset is committed with the change.
344
344
345 keyword arguments:
345 keyword arguments:
346
346
347 local: whether to store tags in non-version-controlled file
347 local: whether to store tags in non-version-controlled file
348 (default False)
348 (default False)
349
349
350 message: commit message to use if committing
350 message: commit message to use if committing
351
351
352 user: name of user to use if committing
352 user: name of user to use if committing
353
353
354 date: date tuple to use if committing'''
354 date: date tuple to use if committing'''
355
355
356 if not local:
356 if not local:
357 for x in self.status()[:5]:
357 for x in self.status()[:5]:
358 if '.hgtags' in x:
358 if '.hgtags' in x:
359 raise util.Abort(_('working copy of .hgtags is changed '
359 raise util.Abort(_('working copy of .hgtags is changed '
360 '(please commit .hgtags manually)'))
360 '(please commit .hgtags manually)'))
361
361
362 self.tags() # instantiate the cache
362 self.tags() # instantiate the cache
363 self._tag(names, node, message, local, user, date)
363 self._tag(names, node, message, local, user, date)
364
364
365 @propertycache
365 @propertycache
366 def _tagscache(self):
366 def _tagscache(self):
367 '''Returns a tagscache object that contains various tags related caches.'''
367 '''Returns a tagscache object that contains various tags related caches.'''
368
368
369 # This simplifies its cache management by having one decorated
369 # This simplifies its cache management by having one decorated
370 # function (this one) and the rest simply fetch things from it.
370 # function (this one) and the rest simply fetch things from it.
371 class tagscache(object):
371 class tagscache(object):
372 def __init__(self):
372 def __init__(self):
373 # These two define the set of tags for this repository. tags
373 # These two define the set of tags for this repository. tags
374 # maps tag name to node; tagtypes maps tag name to 'global' or
374 # maps tag name to node; tagtypes maps tag name to 'global' or
375 # 'local'. (Global tags are defined by .hgtags across all
375 # 'local'. (Global tags are defined by .hgtags across all
376 # heads, and local tags are defined in .hg/localtags.)
376 # heads, and local tags are defined in .hg/localtags.)
377 # They constitute the in-memory cache of tags.
377 # They constitute the in-memory cache of tags.
378 self.tags = self.tagtypes = None
378 self.tags = self.tagtypes = None
379
379
380 self.nodetagscache = self.tagslist = None
380 self.nodetagscache = self.tagslist = None
381
381
382 cache = tagscache()
382 cache = tagscache()
383 cache.tags, cache.tagtypes = self._findtags()
383 cache.tags, cache.tagtypes = self._findtags()
384
384
385 return cache
385 return cache
386
386
387 def tags(self):
387 def tags(self):
388 '''return a mapping of tag to node'''
388 '''return a mapping of tag to node'''
389 return self._tagscache.tags
389 return self._tagscache.tags
390
390
391 def _findtags(self):
391 def _findtags(self):
392 '''Do the hard work of finding tags. Return a pair of dicts
392 '''Do the hard work of finding tags. Return a pair of dicts
393 (tags, tagtypes) where tags maps tag name to node, and tagtypes
393 (tags, tagtypes) where tags maps tag name to node, and tagtypes
394 maps tag name to a string like \'global\' or \'local\'.
394 maps tag name to a string like \'global\' or \'local\'.
395 Subclasses or extensions are free to add their own tags, but
395 Subclasses or extensions are free to add their own tags, but
396 should be aware that the returned dicts will be retained for the
396 should be aware that the returned dicts will be retained for the
397 duration of the localrepo object.'''
397 duration of the localrepo object.'''
398
398
399 # XXX what tagtype should subclasses/extensions use? Currently
399 # XXX what tagtype should subclasses/extensions use? Currently
400 # mq and bookmarks add tags, but do not set the tagtype at all.
400 # mq and bookmarks add tags, but do not set the tagtype at all.
401 # Should each extension invent its own tag type? Should there
401 # Should each extension invent its own tag type? Should there
402 # be one tagtype for all such "virtual" tags? Or is the status
402 # be one tagtype for all such "virtual" tags? Or is the status
403 # quo fine?
403 # quo fine?
404
404
405 alltags = {} # map tag name to (node, hist)
405 alltags = {} # map tag name to (node, hist)
406 tagtypes = {}
406 tagtypes = {}
407
407
408 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
408 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
409 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
409 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
410
410
411 # Build the return dicts. Have to re-encode tag names because
411 # Build the return dicts. Have to re-encode tag names because
412 # the tags module always uses UTF-8 (in order not to lose info
412 # the tags module always uses UTF-8 (in order not to lose info
413 # writing to the cache), but the rest of Mercurial wants them in
413 # writing to the cache), but the rest of Mercurial wants them in
414 # local encoding.
414 # local encoding.
415 tags = {}
415 tags = {}
416 for (name, (node, hist)) in alltags.iteritems():
416 for (name, (node, hist)) in alltags.iteritems():
417 if node != nullid:
417 if node != nullid:
418 try:
418 try:
419 # ignore tags to unknown nodes
419 # ignore tags to unknown nodes
420 self.changelog.lookup(node)
420 self.changelog.lookup(node)
421 tags[encoding.tolocal(name)] = node
421 tags[encoding.tolocal(name)] = node
422 except error.LookupError:
422 except error.LookupError:
423 pass
423 pass
424 tags['tip'] = self.changelog.tip()
424 tags['tip'] = self.changelog.tip()
425 tagtypes = dict([(encoding.tolocal(name), value)
425 tagtypes = dict([(encoding.tolocal(name), value)
426 for (name, value) in tagtypes.iteritems()])
426 for (name, value) in tagtypes.iteritems()])
427 return (tags, tagtypes)
427 return (tags, tagtypes)
428
428
429 def tagtype(self, tagname):
429 def tagtype(self, tagname):
430 '''
430 '''
431 return the type of the given tag. result can be:
431 return the type of the given tag. result can be:
432
432
433 'local' : a local tag
433 'local' : a local tag
434 'global' : a global tag
434 'global' : a global tag
435 None : tag does not exist
435 None : tag does not exist
436 '''
436 '''
437
437
438 return self._tagscache.tagtypes.get(tagname)
438 return self._tagscache.tagtypes.get(tagname)
439
439
440 def tagslist(self):
440 def tagslist(self):
441 '''return a list of tags ordered by revision'''
441 '''return a list of tags ordered by revision'''
442 if not self._tagscache.tagslist:
442 if not self._tagscache.tagslist:
443 l = []
443 l = []
444 for t, n in self.tags().iteritems():
444 for t, n in self.tags().iteritems():
445 r = self.changelog.rev(n)
445 r = self.changelog.rev(n)
446 l.append((r, t, n))
446 l.append((r, t, n))
447 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
447 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
448
448
449 return self._tagscache.tagslist
449 return self._tagscache.tagslist
450
450
451 def nodetags(self, node):
451 def nodetags(self, node):
452 '''return the tags associated with a node'''
452 '''return the tags associated with a node'''
453 if not self._tagscache.nodetagscache:
453 if not self._tagscache.nodetagscache:
454 nodetagscache = {}
454 nodetagscache = {}
455 for t, n in self.tags().iteritems():
455 for t, n in self.tags().iteritems():
456 nodetagscache.setdefault(n, []).append(t)
456 nodetagscache.setdefault(n, []).append(t)
457 for tags in nodetagscache.itervalues():
457 for tags in nodetagscache.itervalues():
458 tags.sort()
458 tags.sort()
459 self._tagscache.nodetagscache = nodetagscache
459 self._tagscache.nodetagscache = nodetagscache
460 return self._tagscache.nodetagscache.get(node, [])
460 return self._tagscache.nodetagscache.get(node, [])
461
461
462 def nodebookmarks(self, node):
462 def nodebookmarks(self, node):
463 marks = []
463 marks = []
464 for bookmark, n in self._bookmarks.iteritems():
464 for bookmark, n in self._bookmarks.iteritems():
465 if n == node:
465 if n == node:
466 marks.append(bookmark)
466 marks.append(bookmark)
467 return sorted(marks)
467 return sorted(marks)
468
468
469 def _branchtags(self, partial, lrev):
469 def _branchtags(self, partial, lrev):
470 # TODO: rename this function?
470 # TODO: rename this function?
471 tiprev = len(self) - 1
471 tiprev = len(self) - 1
472 if lrev != tiprev:
472 if lrev != tiprev:
473 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
473 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
474 self._updatebranchcache(partial, ctxgen)
474 self._updatebranchcache(partial, ctxgen)
475 self._writebranchcache(partial, self.changelog.tip(), tiprev)
475 self._writebranchcache(partial, self.changelog.tip(), tiprev)
476
476
477 return partial
477 return partial
478
478
479 def updatebranchcache(self):
479 def updatebranchcache(self):
480 tip = self.changelog.tip()
480 tip = self.changelog.tip()
481 if self._branchcache is not None and self._branchcachetip == tip:
481 if self._branchcache is not None and self._branchcachetip == tip:
482 return self._branchcache
482 return self._branchcache
483
483
484 oldtip = self._branchcachetip
484 oldtip = self._branchcachetip
485 self._branchcachetip = tip
485 self._branchcachetip = tip
486 if oldtip is None or oldtip not in self.changelog.nodemap:
486 if oldtip is None or oldtip not in self.changelog.nodemap:
487 partial, last, lrev = self._readbranchcache()
487 partial, last, lrev = self._readbranchcache()
488 else:
488 else:
489 lrev = self.changelog.rev(oldtip)
489 lrev = self.changelog.rev(oldtip)
490 partial = self._branchcache
490 partial = self._branchcache
491
491
492 self._branchtags(partial, lrev)
492 self._branchtags(partial, lrev)
493 # this private cache holds all heads (not just tips)
493 # this private cache holds all heads (not just tips)
494 self._branchcache = partial
494 self._branchcache = partial
495
495
496 def branchmap(self):
496 def branchmap(self):
497 '''returns a dictionary {branch: [branchheads]}'''
497 '''returns a dictionary {branch: [branchheads]}'''
498 self.updatebranchcache()
498 self.updatebranchcache()
499 return self._branchcache
499 return self._branchcache
500
500
501 def branchtags(self):
501 def branchtags(self):
502 '''return a dict where branch names map to the tipmost head of
502 '''return a dict where branch names map to the tipmost head of
503 the branch, open heads come before closed'''
503 the branch, open heads come before closed'''
504 bt = {}
504 bt = {}
505 for bn, heads in self.branchmap().iteritems():
505 for bn, heads in self.branchmap().iteritems():
506 tip = heads[-1]
506 tip = heads[-1]
507 for h in reversed(heads):
507 for h in reversed(heads):
508 if 'close' not in self.changelog.read(h)[5]:
508 if 'close' not in self.changelog.read(h)[5]:
509 tip = h
509 tip = h
510 break
510 break
511 bt[bn] = tip
511 bt[bn] = tip
512 return bt
512 return bt
513
513
514 def _readbranchcache(self):
514 def _readbranchcache(self):
515 partial = {}
515 partial = {}
516 try:
516 try:
517 f = self.opener("cache/branchheads")
517 f = self.opener("cache/branchheads")
518 lines = f.read().split('\n')
518 lines = f.read().split('\n')
519 f.close()
519 f.close()
520 except (IOError, OSError):
520 except (IOError, OSError):
521 return {}, nullid, nullrev
521 return {}, nullid, nullrev
522
522
523 try:
523 try:
524 last, lrev = lines.pop(0).split(" ", 1)
524 last, lrev = lines.pop(0).split(" ", 1)
525 last, lrev = bin(last), int(lrev)
525 last, lrev = bin(last), int(lrev)
526 if lrev >= len(self) or self[lrev].node() != last:
526 if lrev >= len(self) or self[lrev].node() != last:
527 # invalidate the cache
527 # invalidate the cache
528 raise ValueError('invalidating branch cache (tip differs)')
528 raise ValueError('invalidating branch cache (tip differs)')
529 for l in lines:
529 for l in lines:
530 if not l:
530 if not l:
531 continue
531 continue
532 node, label = l.split(" ", 1)
532 node, label = l.split(" ", 1)
533 label = encoding.tolocal(label.strip())
533 label = encoding.tolocal(label.strip())
534 partial.setdefault(label, []).append(bin(node))
534 partial.setdefault(label, []).append(bin(node))
535 except KeyboardInterrupt:
535 except KeyboardInterrupt:
536 raise
536 raise
537 except Exception, inst:
537 except Exception, inst:
538 if self.ui.debugflag:
538 if self.ui.debugflag:
539 self.ui.warn(str(inst), '\n')
539 self.ui.warn(str(inst), '\n')
540 partial, last, lrev = {}, nullid, nullrev
540 partial, last, lrev = {}, nullid, nullrev
541 return partial, last, lrev
541 return partial, last, lrev
542
542
543 def _writebranchcache(self, branches, tip, tiprev):
543 def _writebranchcache(self, branches, tip, tiprev):
544 try:
544 try:
545 f = self.opener("cache/branchheads", "w", atomictemp=True)
545 f = self.opener("cache/branchheads", "w", atomictemp=True)
546 f.write("%s %s\n" % (hex(tip), tiprev))
546 f.write("%s %s\n" % (hex(tip), tiprev))
547 for label, nodes in branches.iteritems():
547 for label, nodes in branches.iteritems():
548 for node in nodes:
548 for node in nodes:
549 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
549 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
550 f.close()
550 f.close()
551 except (IOError, OSError):
551 except (IOError, OSError):
552 pass
552 pass
553
553
554 def _updatebranchcache(self, partial, ctxgen):
554 def _updatebranchcache(self, partial, ctxgen):
555 # collect new branch entries
555 # collect new branch entries
556 newbranches = {}
556 newbranches = {}
557 for c in ctxgen:
557 for c in ctxgen:
558 newbranches.setdefault(c.branch(), []).append(c.node())
558 newbranches.setdefault(c.branch(), []).append(c.node())
559 # if older branchheads are reachable from new ones, they aren't
559 # if older branchheads are reachable from new ones, they aren't
560 # really branchheads. Note checking parents is insufficient:
560 # really branchheads. Note checking parents is insufficient:
561 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
561 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
562 for branch, newnodes in newbranches.iteritems():
562 for branch, newnodes in newbranches.iteritems():
563 bheads = partial.setdefault(branch, [])
563 bheads = partial.setdefault(branch, [])
564 bheads.extend(newnodes)
564 bheads.extend(newnodes)
565 if len(bheads) <= 1:
565 if len(bheads) <= 1:
566 continue
566 continue
567 bheads = sorted(bheads, key=lambda x: self[x].rev())
567 bheads = sorted(bheads, key=lambda x: self[x].rev())
568 # starting from tip means fewer passes over reachable
568 # starting from tip means fewer passes over reachable
569 while newnodes:
569 while newnodes:
570 latest = newnodes.pop()
570 latest = newnodes.pop()
571 if latest not in bheads:
571 if latest not in bheads:
572 continue
572 continue
573 minbhrev = self[bheads[0]].node()
573 minbhrev = self[bheads[0]].node()
574 reachable = self.changelog.reachable(latest, minbhrev)
574 reachable = self.changelog.reachable(latest, minbhrev)
575 reachable.remove(latest)
575 reachable.remove(latest)
576 if reachable:
576 if reachable:
577 bheads = [b for b in bheads if b not in reachable]
577 bheads = [b for b in bheads if b not in reachable]
578 partial[branch] = bheads
578 partial[branch] = bheads
579
579
580 def lookup(self, key):
580 def lookup(self, key):
581 if isinstance(key, int):
581 if isinstance(key, int):
582 return self.changelog.node(key)
582 return self.changelog.node(key)
583 elif key == '.':
583 elif key == '.':
584 return self.dirstate.p1()
584 return self.dirstate.p1()
585 elif key == 'null':
585 elif key == 'null':
586 return nullid
586 return nullid
587 elif key == 'tip':
587 elif key == 'tip':
588 return self.changelog.tip()
588 return self.changelog.tip()
589 n = self.changelog._match(key)
589 n = self.changelog._match(key)
590 if n:
590 if n:
591 return n
591 return n
592 if key in self._bookmarks:
592 if key in self._bookmarks:
593 return self._bookmarks[key]
593 return self._bookmarks[key]
594 if key in self.tags():
594 if key in self.tags():
595 return self.tags()[key]
595 return self.tags()[key]
596 if key in self.branchtags():
596 if key in self.branchtags():
597 return self.branchtags()[key]
597 return self.branchtags()[key]
598 n = self.changelog._partialmatch(key)
598 n = self.changelog._partialmatch(key)
599 if n:
599 if n:
600 return n
600 return n
601
601
602 # can't find key, check if it might have come from damaged dirstate
602 # can't find key, check if it might have come from damaged dirstate
603 if key in self.dirstate.parents():
603 if key in self.dirstate.parents():
604 raise error.Abort(_("working directory has unknown parent '%s'!")
604 raise error.Abort(_("working directory has unknown parent '%s'!")
605 % short(key))
605 % short(key))
606 try:
606 try:
607 if len(key) == 20:
607 if len(key) == 20:
608 key = hex(key)
608 key = hex(key)
609 except TypeError:
609 except TypeError:
610 pass
610 pass
611 raise error.RepoLookupError(_("unknown revision '%s'") % key)
611 raise error.RepoLookupError(_("unknown revision '%s'") % key)
612
612
613 def lookupbranch(self, key, remote=None):
613 def lookupbranch(self, key, remote=None):
614 repo = remote or self
614 repo = remote or self
615 if key in repo.branchmap():
615 if key in repo.branchmap():
616 return key
616 return key
617
617
618 repo = (remote and remote.local()) and remote or self
618 repo = (remote and remote.local()) and remote or self
619 return repo[key].branch()
619 return repo[key].branch()
620
620
621 def known(self, nodes):
621 def known(self, nodes):
622 nm = self.changelog.nodemap
622 nm = self.changelog.nodemap
623 return [(n in nm) for n in nodes]
623 return [(n in nm) for n in nodes]
624
624
625 def local(self):
625 def local(self):
626 return self
626 return self
627
627
628 def join(self, f):
628 def join(self, f):
629 return os.path.join(self.path, f)
629 return os.path.join(self.path, f)
630
630
631 def wjoin(self, f):
631 def wjoin(self, f):
632 return os.path.join(self.root, f)
632 return os.path.join(self.root, f)
633
633
634 def file(self, f):
634 def file(self, f):
635 if f[0] == '/':
635 if f[0] == '/':
636 f = f[1:]
636 f = f[1:]
637 return filelog.filelog(self.sopener, f)
637 return filelog.filelog(self.sopener, f)
638
638
639 def changectx(self, changeid):
639 def changectx(self, changeid):
640 return self[changeid]
640 return self[changeid]
641
641
642 def parents(self, changeid=None):
642 def parents(self, changeid=None):
643 '''get list of changectxs for parents of changeid'''
643 '''get list of changectxs for parents of changeid'''
644 return self[changeid].parents()
644 return self[changeid].parents()
645
645
646 def filectx(self, path, changeid=None, fileid=None):
646 def filectx(self, path, changeid=None, fileid=None):
647 """changeid can be a changeset revision, node, or tag.
647 """changeid can be a changeset revision, node, or tag.
648 fileid can be a file revision or node."""
648 fileid can be a file revision or node."""
649 return context.filectx(self, path, changeid, fileid)
649 return context.filectx(self, path, changeid, fileid)
650
650
651 def getcwd(self):
651 def getcwd(self):
652 return self.dirstate.getcwd()
652 return self.dirstate.getcwd()
653
653
654 def pathto(self, f, cwd=None):
654 def pathto(self, f, cwd=None):
655 return self.dirstate.pathto(f, cwd)
655 return self.dirstate.pathto(f, cwd)
656
656
657 def wfile(self, f, mode='r'):
657 def wfile(self, f, mode='r'):
658 return self.wopener(f, mode)
658 return self.wopener(f, mode)
659
659
660 def _link(self, f):
660 def _link(self, f):
661 return os.path.islink(self.wjoin(f))
661 return os.path.islink(self.wjoin(f))
662
662
663 def _loadfilter(self, filter):
663 def _loadfilter(self, filter):
664 if filter not in self.filterpats:
664 if filter not in self.filterpats:
665 l = []
665 l = []
666 for pat, cmd in self.ui.configitems(filter):
666 for pat, cmd in self.ui.configitems(filter):
667 if cmd == '!':
667 if cmd == '!':
668 continue
668 continue
669 mf = matchmod.match(self.root, '', [pat])
669 mf = matchmod.match(self.root, '', [pat])
670 fn = None
670 fn = None
671 params = cmd
671 params = cmd
672 for name, filterfn in self._datafilters.iteritems():
672 for name, filterfn in self._datafilters.iteritems():
673 if cmd.startswith(name):
673 if cmd.startswith(name):
674 fn = filterfn
674 fn = filterfn
675 params = cmd[len(name):].lstrip()
675 params = cmd[len(name):].lstrip()
676 break
676 break
677 if not fn:
677 if not fn:
678 fn = lambda s, c, **kwargs: util.filter(s, c)
678 fn = lambda s, c, **kwargs: util.filter(s, c)
679 # Wrap old filters not supporting keyword arguments
679 # Wrap old filters not supporting keyword arguments
680 if not inspect.getargspec(fn)[2]:
680 if not inspect.getargspec(fn)[2]:
681 oldfn = fn
681 oldfn = fn
682 fn = lambda s, c, **kwargs: oldfn(s, c)
682 fn = lambda s, c, **kwargs: oldfn(s, c)
683 l.append((mf, fn, params))
683 l.append((mf, fn, params))
684 self.filterpats[filter] = l
684 self.filterpats[filter] = l
685 return self.filterpats[filter]
685 return self.filterpats[filter]
686
686
687 def _filter(self, filterpats, filename, data):
687 def _filter(self, filterpats, filename, data):
688 for mf, fn, cmd in filterpats:
688 for mf, fn, cmd in filterpats:
689 if mf(filename):
689 if mf(filename):
690 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
690 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
691 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
691 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
692 break
692 break
693
693
694 return data
694 return data
695
695
696 @propertycache
696 @propertycache
697 def _encodefilterpats(self):
697 def _encodefilterpats(self):
698 return self._loadfilter('encode')
698 return self._loadfilter('encode')
699
699
700 @propertycache
700 @propertycache
701 def _decodefilterpats(self):
701 def _decodefilterpats(self):
702 return self._loadfilter('decode')
702 return self._loadfilter('decode')
703
703
704 def adddatafilter(self, name, filter):
704 def adddatafilter(self, name, filter):
705 self._datafilters[name] = filter
705 self._datafilters[name] = filter
706
706
707 def wread(self, filename):
707 def wread(self, filename):
708 if self._link(filename):
708 if self._link(filename):
709 data = os.readlink(self.wjoin(filename))
709 data = os.readlink(self.wjoin(filename))
710 else:
710 else:
711 data = self.wopener.read(filename)
711 data = self.wopener.read(filename)
712 return self._filter(self._encodefilterpats, filename, data)
712 return self._filter(self._encodefilterpats, filename, data)
713
713
714 def wwrite(self, filename, data, flags):
714 def wwrite(self, filename, data, flags):
715 data = self._filter(self._decodefilterpats, filename, data)
715 data = self._filter(self._decodefilterpats, filename, data)
716 if 'l' in flags:
716 if 'l' in flags:
717 self.wopener.symlink(data, filename)
717 self.wopener.symlink(data, filename)
718 else:
718 else:
719 self.wopener.write(filename, data)
719 self.wopener.write(filename, data)
720 if 'x' in flags:
720 if 'x' in flags:
721 util.setflags(self.wjoin(filename), False, True)
721 util.setflags(self.wjoin(filename), False, True)
722
722
723 def wwritedata(self, filename, data):
723 def wwritedata(self, filename, data):
724 return self._filter(self._decodefilterpats, filename, data)
724 return self._filter(self._decodefilterpats, filename, data)
725
725
726 def transaction(self, desc):
726 def transaction(self, desc):
727 tr = self._transref and self._transref() or None
727 tr = self._transref and self._transref() or None
728 if tr and tr.running():
728 if tr and tr.running():
729 return tr.nest()
729 return tr.nest()
730
730
731 # abort here if the journal already exists
731 # abort here if the journal already exists
732 if os.path.exists(self.sjoin("journal")):
732 if os.path.exists(self.sjoin("journal")):
733 raise error.RepoError(
733 raise error.RepoError(
734 _("abandoned transaction found - run hg recover"))
734 _("abandoned transaction found - run hg recover"))
735
735
736 journalfiles = self._writejournal(desc)
736 journalfiles = self._writejournal(desc)
737 renames = [(x, undoname(x)) for x in journalfiles]
737 renames = [(x, undoname(x)) for x in journalfiles]
738
738
739 tr = transaction.transaction(self.ui.warn, self.sopener,
739 tr = transaction.transaction(self.ui.warn, self.sopener,
740 self.sjoin("journal"),
740 self.sjoin("journal"),
741 aftertrans(renames),
741 aftertrans(renames),
742 self.store.createmode)
742 self.store.createmode)
743 self._transref = weakref.ref(tr)
743 self._transref = weakref.ref(tr)
744 return tr
744 return tr
745
745
746 def _writejournal(self, desc):
746 def _writejournal(self, desc):
747 # save dirstate for rollback
747 # save dirstate for rollback
748 try:
748 try:
749 ds = self.opener.read("dirstate")
749 ds = self.opener.read("dirstate")
750 except IOError:
750 except IOError:
751 ds = ""
751 ds = ""
752 self.opener.write("journal.dirstate", ds)
752 self.opener.write("journal.dirstate", ds)
753 self.opener.write("journal.branch",
753 self.opener.write("journal.branch",
754 encoding.fromlocal(self.dirstate.branch()))
754 encoding.fromlocal(self.dirstate.branch()))
755 self.opener.write("journal.desc",
755 self.opener.write("journal.desc",
756 "%d\n%s\n" % (len(self), desc))
756 "%d\n%s\n" % (len(self), desc))
757
757
758 bkname = self.join('bookmarks')
758 bkname = self.join('bookmarks')
759 if os.path.exists(bkname):
759 if os.path.exists(bkname):
760 util.copyfile(bkname, self.join('journal.bookmarks'))
760 util.copyfile(bkname, self.join('journal.bookmarks'))
761 else:
761 else:
762 self.opener.write('journal.bookmarks', '')
762 self.opener.write('journal.bookmarks', '')
763 phasesname = self.sjoin('phaseroots')
763 phasesname = self.sjoin('phaseroots')
764 if os.path.exists(phasesname):
764 if os.path.exists(phasesname):
765 util.copyfile(phasesname, self.sjoin('journal.phaseroots'))
765 util.copyfile(phasesname, self.sjoin('journal.phaseroots'))
766 else:
766 else:
767 self.sopener.write('journal.phaseroots', '')
767 self.sopener.write('journal.phaseroots', '')
768
768
769 return (self.sjoin('journal'), self.join('journal.dirstate'),
769 return (self.sjoin('journal'), self.join('journal.dirstate'),
770 self.join('journal.branch'), self.join('journal.desc'),
770 self.join('journal.branch'), self.join('journal.desc'),
771 self.join('journal.bookmarks'),
771 self.join('journal.bookmarks'),
772 self.sjoin('journal.phaseroots'))
772 self.sjoin('journal.phaseroots'))
773
773
774 def recover(self):
774 def recover(self):
775 lock = self.lock()
775 lock = self.lock()
776 try:
776 try:
777 if os.path.exists(self.sjoin("journal")):
777 if os.path.exists(self.sjoin("journal")):
778 self.ui.status(_("rolling back interrupted transaction\n"))
778 self.ui.status(_("rolling back interrupted transaction\n"))
779 transaction.rollback(self.sopener, self.sjoin("journal"),
779 transaction.rollback(self.sopener, self.sjoin("journal"),
780 self.ui.warn)
780 self.ui.warn)
781 self.invalidate()
781 self.invalidate()
782 return True
782 return True
783 else:
783 else:
784 self.ui.warn(_("no interrupted transaction available\n"))
784 self.ui.warn(_("no interrupted transaction available\n"))
785 return False
785 return False
786 finally:
786 finally:
787 lock.release()
787 lock.release()
788
788
789 def rollback(self, dryrun=False, force=False):
789 def rollback(self, dryrun=False, force=False):
790 wlock = lock = None
790 wlock = lock = None
791 try:
791 try:
792 wlock = self.wlock()
792 wlock = self.wlock()
793 lock = self.lock()
793 lock = self.lock()
794 if os.path.exists(self.sjoin("undo")):
794 if os.path.exists(self.sjoin("undo")):
795 return self._rollback(dryrun, force)
795 return self._rollback(dryrun, force)
796 else:
796 else:
797 self.ui.warn(_("no rollback information available\n"))
797 self.ui.warn(_("no rollback information available\n"))
798 return 1
798 return 1
799 finally:
799 finally:
800 release(lock, wlock)
800 release(lock, wlock)
801
801
802 def _rollback(self, dryrun, force):
802 def _rollback(self, dryrun, force):
803 ui = self.ui
803 ui = self.ui
804 try:
804 try:
805 args = self.opener.read('undo.desc').splitlines()
805 args = self.opener.read('undo.desc').splitlines()
806 (oldlen, desc, detail) = (int(args[0]), args[1], None)
806 (oldlen, desc, detail) = (int(args[0]), args[1], None)
807 if len(args) >= 3:
807 if len(args) >= 3:
808 detail = args[2]
808 detail = args[2]
809 oldtip = oldlen - 1
809 oldtip = oldlen - 1
810
810
811 if detail and ui.verbose:
811 if detail and ui.verbose:
812 msg = (_('repository tip rolled back to revision %s'
812 msg = (_('repository tip rolled back to revision %s'
813 ' (undo %s: %s)\n')
813 ' (undo %s: %s)\n')
814 % (oldtip, desc, detail))
814 % (oldtip, desc, detail))
815 else:
815 else:
816 msg = (_('repository tip rolled back to revision %s'
816 msg = (_('repository tip rolled back to revision %s'
817 ' (undo %s)\n')
817 ' (undo %s)\n')
818 % (oldtip, desc))
818 % (oldtip, desc))
819 except IOError:
819 except IOError:
820 msg = _('rolling back unknown transaction\n')
820 msg = _('rolling back unknown transaction\n')
821 desc = None
821 desc = None
822
822
823 if not force and self['.'] != self['tip'] and desc == 'commit':
823 if not force and self['.'] != self['tip'] and desc == 'commit':
824 raise util.Abort(
824 raise util.Abort(
825 _('rollback of last commit while not checked out '
825 _('rollback of last commit while not checked out '
826 'may lose data'), hint=_('use -f to force'))
826 'may lose data'), hint=_('use -f to force'))
827
827
828 ui.status(msg)
828 ui.status(msg)
829 if dryrun:
829 if dryrun:
830 return 0
830 return 0
831
831
832 parents = self.dirstate.parents()
832 parents = self.dirstate.parents()
833 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
833 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
834 if os.path.exists(self.join('undo.bookmarks')):
834 if os.path.exists(self.join('undo.bookmarks')):
835 util.rename(self.join('undo.bookmarks'),
835 util.rename(self.join('undo.bookmarks'),
836 self.join('bookmarks'))
836 self.join('bookmarks'))
837 if os.path.exists(self.sjoin('undo.phaseroots')):
837 if os.path.exists(self.sjoin('undo.phaseroots')):
838 util.rename(self.sjoin('undo.phaseroots'),
838 util.rename(self.sjoin('undo.phaseroots'),
839 self.sjoin('phaseroots'))
839 self.sjoin('phaseroots'))
840 self.invalidate()
840 self.invalidate()
841
841
842 parentgone = (parents[0] not in self.changelog.nodemap or
842 parentgone = (parents[0] not in self.changelog.nodemap or
843 parents[1] not in self.changelog.nodemap)
843 parents[1] not in self.changelog.nodemap)
844 if parentgone:
844 if parentgone:
845 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
845 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
846 try:
846 try:
847 branch = self.opener.read('undo.branch')
847 branch = self.opener.read('undo.branch')
848 self.dirstate.setbranch(branch)
848 self.dirstate.setbranch(branch)
849 except IOError:
849 except IOError:
850 ui.warn(_('named branch could not be reset: '
850 ui.warn(_('named branch could not be reset: '
851 'current branch is still \'%s\'\n')
851 'current branch is still \'%s\'\n')
852 % self.dirstate.branch())
852 % self.dirstate.branch())
853
853
854 self.dirstate.invalidate()
854 self.dirstate.invalidate()
855 parents = tuple([p.rev() for p in self.parents()])
855 parents = tuple([p.rev() for p in self.parents()])
856 if len(parents) > 1:
856 if len(parents) > 1:
857 ui.status(_('working directory now based on '
857 ui.status(_('working directory now based on '
858 'revisions %d and %d\n') % parents)
858 'revisions %d and %d\n') % parents)
859 else:
859 else:
860 ui.status(_('working directory now based on '
860 ui.status(_('working directory now based on '
861 'revision %d\n') % parents)
861 'revision %d\n') % parents)
862 self.destroyed()
862 self.destroyed()
863 return 0
863 return 0
864
864
865 def invalidatecaches(self):
865 def invalidatecaches(self):
866 try:
866 try:
867 delattr(self, '_tagscache')
867 delattr(self, '_tagscache')
868 except AttributeError:
868 except AttributeError:
869 pass
869 pass
870
870
871 self._branchcache = None # in UTF-8
871 self._branchcache = None # in UTF-8
872 self._branchcachetip = None
872 self._branchcachetip = None
873
873
874 def invalidatedirstate(self):
874 def invalidatedirstate(self):
875 '''Invalidates the dirstate, causing the next call to dirstate
875 '''Invalidates the dirstate, causing the next call to dirstate
876 to check if it was modified since the last time it was read,
876 to check if it was modified since the last time it was read,
877 rereading it if it has.
877 rereading it if it has.
878
878
879 This is different to dirstate.invalidate() that it doesn't always
879 This is different to dirstate.invalidate() that it doesn't always
880 rereads the dirstate. Use dirstate.invalidate() if you want to
880 rereads the dirstate. Use dirstate.invalidate() if you want to
881 explicitly read the dirstate again (i.e. restoring it to a previous
881 explicitly read the dirstate again (i.e. restoring it to a previous
882 known good state).'''
882 known good state).'''
883 try:
883 try:
884 delattr(self, 'dirstate')
884 delattr(self, 'dirstate')
885 except AttributeError:
885 except AttributeError:
886 pass
886 pass
887
887
888 def invalidate(self):
888 def invalidate(self):
889 for k in self._filecache:
889 for k in self._filecache:
890 # dirstate is invalidated separately in invalidatedirstate()
890 # dirstate is invalidated separately in invalidatedirstate()
891 if k == 'dirstate':
891 if k == 'dirstate':
892 continue
892 continue
893
893
894 try:
894 try:
895 delattr(self, k)
895 delattr(self, k)
896 except AttributeError:
896 except AttributeError:
897 pass
897 pass
898 self.invalidatecaches()
898 self.invalidatecaches()
899
899
900 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
900 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
901 try:
901 try:
902 l = lock.lock(lockname, 0, releasefn, desc=desc)
902 l = lock.lock(lockname, 0, releasefn, desc=desc)
903 except error.LockHeld, inst:
903 except error.LockHeld, inst:
904 if not wait:
904 if not wait:
905 raise
905 raise
906 self.ui.warn(_("waiting for lock on %s held by %r\n") %
906 self.ui.warn(_("waiting for lock on %s held by %r\n") %
907 (desc, inst.locker))
907 (desc, inst.locker))
908 # default to 600 seconds timeout
908 # default to 600 seconds timeout
909 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
909 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
910 releasefn, desc=desc)
910 releasefn, desc=desc)
911 if acquirefn:
911 if acquirefn:
912 acquirefn()
912 acquirefn()
913 return l
913 return l
914
914
915 def _afterlock(self, callback):
915 def _afterlock(self, callback):
916 """add a callback to the current repository lock.
916 """add a callback to the current repository lock.
917
917
918 The callback will be executed on lock release."""
918 The callback will be executed on lock release."""
919 l = self._lockref and self._lockref()
919 l = self._lockref and self._lockref()
920 if l:
920 if l:
921 l.postrelease.append(callback)
921 l.postrelease.append(callback)
922
922
923 def lock(self, wait=True):
923 def lock(self, wait=True):
924 '''Lock the repository store (.hg/store) and return a weak reference
924 '''Lock the repository store (.hg/store) and return a weak reference
925 to the lock. Use this before modifying the store (e.g. committing or
925 to the lock. Use this before modifying the store (e.g. committing or
926 stripping). If you are opening a transaction, get a lock as well.)'''
926 stripping). If you are opening a transaction, get a lock as well.)'''
927 l = self._lockref and self._lockref()
927 l = self._lockref and self._lockref()
928 if l is not None and l.held:
928 if l is not None and l.held:
929 l.lock()
929 l.lock()
930 return l
930 return l
931
931
932 def unlock():
932 def unlock():
933 self.store.write()
933 self.store.write()
934 if self._dirtyphases:
934 if self._dirtyphases:
935 phases.writeroots(self)
935 phases.writeroots(self)
936 for k, ce in self._filecache.items():
936 for k, ce in self._filecache.items():
937 if k == 'dirstate':
937 if k == 'dirstate':
938 continue
938 continue
939 ce.refresh()
939 ce.refresh()
940
940
941 l = self._lock(self.sjoin("lock"), wait, unlock,
941 l = self._lock(self.sjoin("lock"), wait, unlock,
942 self.invalidate, _('repository %s') % self.origroot)
942 self.invalidate, _('repository %s') % self.origroot)
943 self._lockref = weakref.ref(l)
943 self._lockref = weakref.ref(l)
944 return l
944 return l
945
945
946 def wlock(self, wait=True):
946 def wlock(self, wait=True):
947 '''Lock the non-store parts of the repository (everything under
947 '''Lock the non-store parts of the repository (everything under
948 .hg except .hg/store) and return a weak reference to the lock.
948 .hg except .hg/store) and return a weak reference to the lock.
949 Use this before modifying files in .hg.'''
949 Use this before modifying files in .hg.'''
950 l = self._wlockref and self._wlockref()
950 l = self._wlockref and self._wlockref()
951 if l is not None and l.held:
951 if l is not None and l.held:
952 l.lock()
952 l.lock()
953 return l
953 return l
954
954
955 def unlock():
955 def unlock():
956 self.dirstate.write()
956 self.dirstate.write()
957 ce = self._filecache.get('dirstate')
957 ce = self._filecache.get('dirstate')
958 if ce:
958 if ce:
959 ce.refresh()
959 ce.refresh()
960
960
961 l = self._lock(self.join("wlock"), wait, unlock,
961 l = self._lock(self.join("wlock"), wait, unlock,
962 self.invalidatedirstate, _('working directory of %s') %
962 self.invalidatedirstate, _('working directory of %s') %
963 self.origroot)
963 self.origroot)
964 self._wlockref = weakref.ref(l)
964 self._wlockref = weakref.ref(l)
965 return l
965 return l
966
966
967 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
967 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
968 """
968 """
969 commit an individual file as part of a larger transaction
969 commit an individual file as part of a larger transaction
970 """
970 """
971
971
972 fname = fctx.path()
972 fname = fctx.path()
973 text = fctx.data()
973 text = fctx.data()
974 flog = self.file(fname)
974 flog = self.file(fname)
975 fparent1 = manifest1.get(fname, nullid)
975 fparent1 = manifest1.get(fname, nullid)
976 fparent2 = fparent2o = manifest2.get(fname, nullid)
976 fparent2 = fparent2o = manifest2.get(fname, nullid)
977
977
978 meta = {}
978 meta = {}
979 copy = fctx.renamed()
979 copy = fctx.renamed()
980 if copy and copy[0] != fname:
980 if copy and copy[0] != fname:
981 # Mark the new revision of this file as a copy of another
981 # Mark the new revision of this file as a copy of another
982 # file. This copy data will effectively act as a parent
982 # file. This copy data will effectively act as a parent
983 # of this new revision. If this is a merge, the first
983 # of this new revision. If this is a merge, the first
984 # parent will be the nullid (meaning "look up the copy data")
984 # parent will be the nullid (meaning "look up the copy data")
985 # and the second one will be the other parent. For example:
985 # and the second one will be the other parent. For example:
986 #
986 #
987 # 0 --- 1 --- 3 rev1 changes file foo
987 # 0 --- 1 --- 3 rev1 changes file foo
988 # \ / rev2 renames foo to bar and changes it
988 # \ / rev2 renames foo to bar and changes it
989 # \- 2 -/ rev3 should have bar with all changes and
989 # \- 2 -/ rev3 should have bar with all changes and
990 # should record that bar descends from
990 # should record that bar descends from
991 # bar in rev2 and foo in rev1
991 # bar in rev2 and foo in rev1
992 #
992 #
993 # this allows this merge to succeed:
993 # this allows this merge to succeed:
994 #
994 #
995 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
995 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
996 # \ / merging rev3 and rev4 should use bar@rev2
996 # \ / merging rev3 and rev4 should use bar@rev2
997 # \- 2 --- 4 as the merge base
997 # \- 2 --- 4 as the merge base
998 #
998 #
999
999
1000 cfname = copy[0]
1000 cfname = copy[0]
1001 crev = manifest1.get(cfname)
1001 crev = manifest1.get(cfname)
1002 newfparent = fparent2
1002 newfparent = fparent2
1003
1003
1004 if manifest2: # branch merge
1004 if manifest2: # branch merge
1005 if fparent2 == nullid or crev is None: # copied on remote side
1005 if fparent2 == nullid or crev is None: # copied on remote side
1006 if cfname in manifest2:
1006 if cfname in manifest2:
1007 crev = manifest2[cfname]
1007 crev = manifest2[cfname]
1008 newfparent = fparent1
1008 newfparent = fparent1
1009
1009
1010 # find source in nearest ancestor if we've lost track
1010 # find source in nearest ancestor if we've lost track
1011 if not crev:
1011 if not crev:
1012 self.ui.debug(" %s: searching for copy revision for %s\n" %
1012 self.ui.debug(" %s: searching for copy revision for %s\n" %
1013 (fname, cfname))
1013 (fname, cfname))
1014 for ancestor in self[None].ancestors():
1014 for ancestor in self[None].ancestors():
1015 if cfname in ancestor:
1015 if cfname in ancestor:
1016 crev = ancestor[cfname].filenode()
1016 crev = ancestor[cfname].filenode()
1017 break
1017 break
1018
1018
1019 if crev:
1019 if crev:
1020 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1020 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1021 meta["copy"] = cfname
1021 meta["copy"] = cfname
1022 meta["copyrev"] = hex(crev)
1022 meta["copyrev"] = hex(crev)
1023 fparent1, fparent2 = nullid, newfparent
1023 fparent1, fparent2 = nullid, newfparent
1024 else:
1024 else:
1025 self.ui.warn(_("warning: can't find ancestor for '%s' "
1025 self.ui.warn(_("warning: can't find ancestor for '%s' "
1026 "copied from '%s'!\n") % (fname, cfname))
1026 "copied from '%s'!\n") % (fname, cfname))
1027
1027
1028 elif fparent2 != nullid:
1028 elif fparent2 != nullid:
1029 # is one parent an ancestor of the other?
1029 # is one parent an ancestor of the other?
1030 fparentancestor = flog.ancestor(fparent1, fparent2)
1030 fparentancestor = flog.ancestor(fparent1, fparent2)
1031 if fparentancestor == fparent1:
1031 if fparentancestor == fparent1:
1032 fparent1, fparent2 = fparent2, nullid
1032 fparent1, fparent2 = fparent2, nullid
1033 elif fparentancestor == fparent2:
1033 elif fparentancestor == fparent2:
1034 fparent2 = nullid
1034 fparent2 = nullid
1035
1035
1036 # is the file changed?
1036 # is the file changed?
1037 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1037 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1038 changelist.append(fname)
1038 changelist.append(fname)
1039 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1039 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1040
1040
1041 # are just the flags changed during merge?
1041 # are just the flags changed during merge?
1042 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1042 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1043 changelist.append(fname)
1043 changelist.append(fname)
1044
1044
1045 return fparent1
1045 return fparent1
1046
1046
1047 def commit(self, text="", user=None, date=None, match=None, force=False,
1047 def commit(self, text="", user=None, date=None, match=None, force=False,
1048 editor=False, extra={}):
1048 editor=False, extra={}):
1049 """Add a new revision to current repository.
1049 """Add a new revision to current repository.
1050
1050
1051 Revision information is gathered from the working directory,
1051 Revision information is gathered from the working directory,
1052 match can be used to filter the committed files. If editor is
1052 match can be used to filter the committed files. If editor is
1053 supplied, it is called to get a commit message.
1053 supplied, it is called to get a commit message.
1054 """
1054 """
1055
1055
1056 def fail(f, msg):
1056 def fail(f, msg):
1057 raise util.Abort('%s: %s' % (f, msg))
1057 raise util.Abort('%s: %s' % (f, msg))
1058
1058
1059 if not match:
1059 if not match:
1060 match = matchmod.always(self.root, '')
1060 match = matchmod.always(self.root, '')
1061
1061
1062 if not force:
1062 if not force:
1063 vdirs = []
1063 vdirs = []
1064 match.dir = vdirs.append
1064 match.dir = vdirs.append
1065 match.bad = fail
1065 match.bad = fail
1066
1066
1067 wlock = self.wlock()
1067 wlock = self.wlock()
1068 try:
1068 try:
1069 wctx = self[None]
1069 wctx = self[None]
1070 merge = len(wctx.parents()) > 1
1070 merge = len(wctx.parents()) > 1
1071
1071
1072 if (not force and merge and match and
1072 if (not force and merge and match and
1073 (match.files() or match.anypats())):
1073 (match.files() or match.anypats())):
1074 raise util.Abort(_('cannot partially commit a merge '
1074 raise util.Abort(_('cannot partially commit a merge '
1075 '(do not specify files or patterns)'))
1075 '(do not specify files or patterns)'))
1076
1076
1077 changes = self.status(match=match, clean=force)
1077 changes = self.status(match=match, clean=force)
1078 if force:
1078 if force:
1079 changes[0].extend(changes[6]) # mq may commit unchanged files
1079 changes[0].extend(changes[6]) # mq may commit unchanged files
1080
1080
1081 # check subrepos
1081 # check subrepos
1082 subs = []
1082 subs = []
1083 removedsubs = set()
1083 removedsubs = set()
1084 if '.hgsub' in wctx:
1084 if '.hgsub' in wctx:
1085 # only manage subrepos and .hgsubstate if .hgsub is present
1085 # only manage subrepos and .hgsubstate if .hgsub is present
1086 for p in wctx.parents():
1086 for p in wctx.parents():
1087 removedsubs.update(s for s in p.substate if match(s))
1087 removedsubs.update(s for s in p.substate if match(s))
1088 for s in wctx.substate:
1088 for s in wctx.substate:
1089 removedsubs.discard(s)
1089 removedsubs.discard(s)
1090 if match(s) and wctx.sub(s).dirty():
1090 if match(s) and wctx.sub(s).dirty():
1091 subs.append(s)
1091 subs.append(s)
1092 if (subs or removedsubs):
1092 if (subs or removedsubs):
1093 if (not match('.hgsub') and
1093 if (not match('.hgsub') and
1094 '.hgsub' in (wctx.modified() + wctx.added())):
1094 '.hgsub' in (wctx.modified() + wctx.added())):
1095 raise util.Abort(
1095 raise util.Abort(
1096 _("can't commit subrepos without .hgsub"))
1096 _("can't commit subrepos without .hgsub"))
1097 if '.hgsubstate' not in changes[0]:
1097 if '.hgsubstate' not in changes[0]:
1098 changes[0].insert(0, '.hgsubstate')
1098 changes[0].insert(0, '.hgsubstate')
1099 if '.hgsubstate' in changes[2]:
1099 if '.hgsubstate' in changes[2]:
1100 changes[2].remove('.hgsubstate')
1100 changes[2].remove('.hgsubstate')
1101 elif '.hgsub' in changes[2]:
1101 elif '.hgsub' in changes[2]:
1102 # clean up .hgsubstate when .hgsub is removed
1102 # clean up .hgsubstate when .hgsub is removed
1103 if ('.hgsubstate' in wctx and
1103 if ('.hgsubstate' in wctx and
1104 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1104 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1105 changes[2].insert(0, '.hgsubstate')
1105 changes[2].insert(0, '.hgsubstate')
1106
1106
1107 if subs and not self.ui.configbool('ui', 'commitsubrepos', False):
1107 if subs and not self.ui.configbool('ui', 'commitsubrepos', False):
1108 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
1108 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
1109 if changedsubs:
1109 if changedsubs:
1110 raise util.Abort(_("uncommitted changes in subrepo %s")
1110 raise util.Abort(_("uncommitted changes in subrepo %s")
1111 % changedsubs[0],
1111 % changedsubs[0],
1112 hint=_("use --subrepos for recursive commit"))
1112 hint=_("use --subrepos for recursive commit"))
1113
1113
1114 # make sure all explicit patterns are matched
1114 # make sure all explicit patterns are matched
1115 if not force and match.files():
1115 if not force and match.files():
1116 matched = set(changes[0] + changes[1] + changes[2])
1116 matched = set(changes[0] + changes[1] + changes[2])
1117
1117
1118 for f in match.files():
1118 for f in match.files():
1119 if f == '.' or f in matched or f in wctx.substate:
1119 if f == '.' or f in matched or f in wctx.substate:
1120 continue
1120 continue
1121 if f in changes[3]: # missing
1121 if f in changes[3]: # missing
1122 fail(f, _('file not found!'))
1122 fail(f, _('file not found!'))
1123 if f in vdirs: # visited directory
1123 if f in vdirs: # visited directory
1124 d = f + '/'
1124 d = f + '/'
1125 for mf in matched:
1125 for mf in matched:
1126 if mf.startswith(d):
1126 if mf.startswith(d):
1127 break
1127 break
1128 else:
1128 else:
1129 fail(f, _("no match under directory!"))
1129 fail(f, _("no match under directory!"))
1130 elif f not in self.dirstate:
1130 elif f not in self.dirstate:
1131 fail(f, _("file not tracked!"))
1131 fail(f, _("file not tracked!"))
1132
1132
1133 if (not force and not extra.get("close") and not merge
1133 if (not force and not extra.get("close") and not merge
1134 and not (changes[0] or changes[1] or changes[2])
1134 and not (changes[0] or changes[1] or changes[2])
1135 and wctx.branch() == wctx.p1().branch()):
1135 and wctx.branch() == wctx.p1().branch()):
1136 return None
1136 return None
1137
1137
1138 ms = mergemod.mergestate(self)
1138 ms = mergemod.mergestate(self)
1139 for f in changes[0]:
1139 for f in changes[0]:
1140 if f in ms and ms[f] == 'u':
1140 if f in ms and ms[f] == 'u':
1141 raise util.Abort(_("unresolved merge conflicts "
1141 raise util.Abort(_("unresolved merge conflicts "
1142 "(see hg help resolve)"))
1142 "(see hg help resolve)"))
1143
1143
1144 cctx = context.workingctx(self, text, user, date, extra, changes)
1144 cctx = context.workingctx(self, text, user, date, extra, changes)
1145 if editor:
1145 if editor:
1146 cctx._text = editor(self, cctx, subs)
1146 cctx._text = editor(self, cctx, subs)
1147 edited = (text != cctx._text)
1147 edited = (text != cctx._text)
1148
1148
1149 # commit subs
1149 # commit subs
1150 if subs or removedsubs:
1150 if subs or removedsubs:
1151 state = wctx.substate.copy()
1151 state = wctx.substate.copy()
1152 for s in sorted(subs):
1152 for s in sorted(subs):
1153 sub = wctx.sub(s)
1153 sub = wctx.sub(s)
1154 self.ui.status(_('committing subrepository %s\n') %
1154 self.ui.status(_('committing subrepository %s\n') %
1155 subrepo.subrelpath(sub))
1155 subrepo.subrelpath(sub))
1156 sr = sub.commit(cctx._text, user, date)
1156 sr = sub.commit(cctx._text, user, date)
1157 state[s] = (state[s][0], sr)
1157 state[s] = (state[s][0], sr)
1158 subrepo.writestate(self, state)
1158 subrepo.writestate(self, state)
1159
1159
1160 # Save commit message in case this transaction gets rolled back
1160 # Save commit message in case this transaction gets rolled back
1161 # (e.g. by a pretxncommit hook). Leave the content alone on
1161 # (e.g. by a pretxncommit hook). Leave the content alone on
1162 # the assumption that the user will use the same editor again.
1162 # the assumption that the user will use the same editor again.
1163 msgfn = self.savecommitmessage(cctx._text)
1163 msgfn = self.savecommitmessage(cctx._text)
1164
1164
1165 p1, p2 = self.dirstate.parents()
1165 p1, p2 = self.dirstate.parents()
1166 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1166 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1167 try:
1167 try:
1168 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1168 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1169 ret = self.commitctx(cctx, True)
1169 ret = self.commitctx(cctx, True)
1170 except:
1170 except:
1171 if edited:
1171 if edited:
1172 self.ui.write(
1172 self.ui.write(
1173 _('note: commit message saved in %s\n') % msgfn)
1173 _('note: commit message saved in %s\n') % msgfn)
1174 raise
1174 raise
1175
1175
1176 # update bookmarks, dirstate and mergestate
1176 # update bookmarks, dirstate and mergestate
1177 bookmarks.update(self, p1, ret)
1177 bookmarks.update(self, p1, ret)
1178 for f in changes[0] + changes[1]:
1178 for f in changes[0] + changes[1]:
1179 self.dirstate.normal(f)
1179 self.dirstate.normal(f)
1180 for f in changes[2]:
1180 for f in changes[2]:
1181 self.dirstate.drop(f)
1181 self.dirstate.drop(f)
1182 self.dirstate.setparents(ret)
1182 self.dirstate.setparents(ret)
1183 ms.reset()
1183 ms.reset()
1184 finally:
1184 finally:
1185 wlock.release()
1185 wlock.release()
1186
1186
1187 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1187 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1188 return ret
1188 return ret
1189
1189
1190 def commitctx(self, ctx, error=False):
1190 def commitctx(self, ctx, error=False):
1191 """Add a new revision to current repository.
1191 """Add a new revision to current repository.
1192 Revision information is passed via the context argument.
1192 Revision information is passed via the context argument.
1193 """
1193 """
1194
1194
1195 tr = lock = None
1195 tr = lock = None
1196 removed = list(ctx.removed())
1196 removed = list(ctx.removed())
1197 p1, p2 = ctx.p1(), ctx.p2()
1197 p1, p2 = ctx.p1(), ctx.p2()
1198 user = ctx.user()
1198 user = ctx.user()
1199
1199
1200 lock = self.lock()
1200 lock = self.lock()
1201 try:
1201 try:
1202 tr = self.transaction("commit")
1202 tr = self.transaction("commit")
1203 trp = weakref.proxy(tr)
1203 trp = weakref.proxy(tr)
1204
1204
1205 if ctx.files():
1205 if ctx.files():
1206 m1 = p1.manifest().copy()
1206 m1 = p1.manifest().copy()
1207 m2 = p2.manifest()
1207 m2 = p2.manifest()
1208
1208
1209 # check in files
1209 # check in files
1210 new = {}
1210 new = {}
1211 changed = []
1211 changed = []
1212 linkrev = len(self)
1212 linkrev = len(self)
1213 for f in sorted(ctx.modified() + ctx.added()):
1213 for f in sorted(ctx.modified() + ctx.added()):
1214 self.ui.note(f + "\n")
1214 self.ui.note(f + "\n")
1215 try:
1215 try:
1216 fctx = ctx[f]
1216 fctx = ctx[f]
1217 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1217 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1218 changed)
1218 changed)
1219 m1.set(f, fctx.flags())
1219 m1.set(f, fctx.flags())
1220 except OSError, inst:
1220 except OSError, inst:
1221 self.ui.warn(_("trouble committing %s!\n") % f)
1221 self.ui.warn(_("trouble committing %s!\n") % f)
1222 raise
1222 raise
1223 except IOError, inst:
1223 except IOError, inst:
1224 errcode = getattr(inst, 'errno', errno.ENOENT)
1224 errcode = getattr(inst, 'errno', errno.ENOENT)
1225 if error or errcode and errcode != errno.ENOENT:
1225 if error or errcode and errcode != errno.ENOENT:
1226 self.ui.warn(_("trouble committing %s!\n") % f)
1226 self.ui.warn(_("trouble committing %s!\n") % f)
1227 raise
1227 raise
1228 else:
1228 else:
1229 removed.append(f)
1229 removed.append(f)
1230
1230
1231 # update manifest
1231 # update manifest
1232 m1.update(new)
1232 m1.update(new)
1233 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1233 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1234 drop = [f for f in removed if f in m1]
1234 drop = [f for f in removed if f in m1]
1235 for f in drop:
1235 for f in drop:
1236 del m1[f]
1236 del m1[f]
1237 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1237 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1238 p2.manifestnode(), (new, drop))
1238 p2.manifestnode(), (new, drop))
1239 files = changed + removed
1239 files = changed + removed
1240 else:
1240 else:
1241 mn = p1.manifestnode()
1241 mn = p1.manifestnode()
1242 files = []
1242 files = []
1243
1243
1244 # update changelog
1244 # update changelog
1245 self.changelog.delayupdate()
1245 self.changelog.delayupdate()
1246 n = self.changelog.add(mn, files, ctx.description(),
1246 n = self.changelog.add(mn, files, ctx.description(),
1247 trp, p1.node(), p2.node(),
1247 trp, p1.node(), p2.node(),
1248 user, ctx.date(), ctx.extra().copy())
1248 user, ctx.date(), ctx.extra().copy())
1249 p = lambda: self.changelog.writepending() and self.root or ""
1249 p = lambda: self.changelog.writepending() and self.root or ""
1250 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1250 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1251 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1251 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1252 parent2=xp2, pending=p)
1252 parent2=xp2, pending=p)
1253 self.changelog.finalize(trp)
1253 self.changelog.finalize(trp)
1254 # ensure the new commit is 1-phase
1254 # set the new commit is proper phase
1255 phases.retractboundary(self, 1, [n])
1255 targetphase = self.ui.configint('phases', 'new-commit', 1)
1256 if targetphase:
1257 # retract boundary do not alter parent changeset.
1258 # if a parent have higher the resulting phase will
1259 # be compliant anyway
1260 #
1261 # if minimal phase was 0 we don't need to retract anything
1262 phases.retractboundary(self, targetphase, [n])
1256 tr.close()
1263 tr.close()
1257
1264
1258 if self._branchcache:
1265 if self._branchcache:
1259 self.updatebranchcache()
1266 self.updatebranchcache()
1260 return n
1267 return n
1261 finally:
1268 finally:
1262 if tr:
1269 if tr:
1263 tr.release()
1270 tr.release()
1264 lock.release()
1271 lock.release()
1265
1272
1266 def destroyed(self):
1273 def destroyed(self):
1267 '''Inform the repository that nodes have been destroyed.
1274 '''Inform the repository that nodes have been destroyed.
1268 Intended for use by strip and rollback, so there's a common
1275 Intended for use by strip and rollback, so there's a common
1269 place for anything that has to be done after destroying history.'''
1276 place for anything that has to be done after destroying history.'''
1270 # XXX it might be nice if we could take the list of destroyed
1277 # XXX it might be nice if we could take the list of destroyed
1271 # nodes, but I don't see an easy way for rollback() to do that
1278 # nodes, but I don't see an easy way for rollback() to do that
1272
1279
1273 # Ensure the persistent tag cache is updated. Doing it now
1280 # Ensure the persistent tag cache is updated. Doing it now
1274 # means that the tag cache only has to worry about destroyed
1281 # means that the tag cache only has to worry about destroyed
1275 # heads immediately after a strip/rollback. That in turn
1282 # heads immediately after a strip/rollback. That in turn
1276 # guarantees that "cachetip == currenttip" (comparing both rev
1283 # guarantees that "cachetip == currenttip" (comparing both rev
1277 # and node) always means no nodes have been added or destroyed.
1284 # and node) always means no nodes have been added or destroyed.
1278
1285
1279 # XXX this is suboptimal when qrefresh'ing: we strip the current
1286 # XXX this is suboptimal when qrefresh'ing: we strip the current
1280 # head, refresh the tag cache, then immediately add a new head.
1287 # head, refresh the tag cache, then immediately add a new head.
1281 # But I think doing it this way is necessary for the "instant
1288 # But I think doing it this way is necessary for the "instant
1282 # tag cache retrieval" case to work.
1289 # tag cache retrieval" case to work.
1283 self.invalidatecaches()
1290 self.invalidatecaches()
1284
1291
1285 def walk(self, match, node=None):
1292 def walk(self, match, node=None):
1286 '''
1293 '''
1287 walk recursively through the directory tree or a given
1294 walk recursively through the directory tree or a given
1288 changeset, finding all files matched by the match
1295 changeset, finding all files matched by the match
1289 function
1296 function
1290 '''
1297 '''
1291 return self[node].walk(match)
1298 return self[node].walk(match)
1292
1299
1293 def status(self, node1='.', node2=None, match=None,
1300 def status(self, node1='.', node2=None, match=None,
1294 ignored=False, clean=False, unknown=False,
1301 ignored=False, clean=False, unknown=False,
1295 listsubrepos=False):
1302 listsubrepos=False):
1296 """return status of files between two nodes or node and working directory
1303 """return status of files between two nodes or node and working directory
1297
1304
1298 If node1 is None, use the first dirstate parent instead.
1305 If node1 is None, use the first dirstate parent instead.
1299 If node2 is None, compare node1 with working directory.
1306 If node2 is None, compare node1 with working directory.
1300 """
1307 """
1301
1308
1302 def mfmatches(ctx):
1309 def mfmatches(ctx):
1303 mf = ctx.manifest().copy()
1310 mf = ctx.manifest().copy()
1304 for fn in mf.keys():
1311 for fn in mf.keys():
1305 if not match(fn):
1312 if not match(fn):
1306 del mf[fn]
1313 del mf[fn]
1307 return mf
1314 return mf
1308
1315
1309 if isinstance(node1, context.changectx):
1316 if isinstance(node1, context.changectx):
1310 ctx1 = node1
1317 ctx1 = node1
1311 else:
1318 else:
1312 ctx1 = self[node1]
1319 ctx1 = self[node1]
1313 if isinstance(node2, context.changectx):
1320 if isinstance(node2, context.changectx):
1314 ctx2 = node2
1321 ctx2 = node2
1315 else:
1322 else:
1316 ctx2 = self[node2]
1323 ctx2 = self[node2]
1317
1324
1318 working = ctx2.rev() is None
1325 working = ctx2.rev() is None
1319 parentworking = working and ctx1 == self['.']
1326 parentworking = working and ctx1 == self['.']
1320 match = match or matchmod.always(self.root, self.getcwd())
1327 match = match or matchmod.always(self.root, self.getcwd())
1321 listignored, listclean, listunknown = ignored, clean, unknown
1328 listignored, listclean, listunknown = ignored, clean, unknown
1322
1329
1323 # load earliest manifest first for caching reasons
1330 # load earliest manifest first for caching reasons
1324 if not working and ctx2.rev() < ctx1.rev():
1331 if not working and ctx2.rev() < ctx1.rev():
1325 ctx2.manifest()
1332 ctx2.manifest()
1326
1333
1327 if not parentworking:
1334 if not parentworking:
1328 def bad(f, msg):
1335 def bad(f, msg):
1329 if f not in ctx1:
1336 if f not in ctx1:
1330 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1337 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1331 match.bad = bad
1338 match.bad = bad
1332
1339
1333 if working: # we need to scan the working dir
1340 if working: # we need to scan the working dir
1334 subrepos = []
1341 subrepos = []
1335 if '.hgsub' in self.dirstate:
1342 if '.hgsub' in self.dirstate:
1336 subrepos = ctx2.substate.keys()
1343 subrepos = ctx2.substate.keys()
1337 s = self.dirstate.status(match, subrepos, listignored,
1344 s = self.dirstate.status(match, subrepos, listignored,
1338 listclean, listunknown)
1345 listclean, listunknown)
1339 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1346 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1340
1347
1341 # check for any possibly clean files
1348 # check for any possibly clean files
1342 if parentworking and cmp:
1349 if parentworking and cmp:
1343 fixup = []
1350 fixup = []
1344 # do a full compare of any files that might have changed
1351 # do a full compare of any files that might have changed
1345 for f in sorted(cmp):
1352 for f in sorted(cmp):
1346 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1353 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1347 or ctx1[f].cmp(ctx2[f])):
1354 or ctx1[f].cmp(ctx2[f])):
1348 modified.append(f)
1355 modified.append(f)
1349 else:
1356 else:
1350 fixup.append(f)
1357 fixup.append(f)
1351
1358
1352 # update dirstate for files that are actually clean
1359 # update dirstate for files that are actually clean
1353 if fixup:
1360 if fixup:
1354 if listclean:
1361 if listclean:
1355 clean += fixup
1362 clean += fixup
1356
1363
1357 try:
1364 try:
1358 # updating the dirstate is optional
1365 # updating the dirstate is optional
1359 # so we don't wait on the lock
1366 # so we don't wait on the lock
1360 wlock = self.wlock(False)
1367 wlock = self.wlock(False)
1361 try:
1368 try:
1362 for f in fixup:
1369 for f in fixup:
1363 self.dirstate.normal(f)
1370 self.dirstate.normal(f)
1364 finally:
1371 finally:
1365 wlock.release()
1372 wlock.release()
1366 except error.LockError:
1373 except error.LockError:
1367 pass
1374 pass
1368
1375
1369 if not parentworking:
1376 if not parentworking:
1370 mf1 = mfmatches(ctx1)
1377 mf1 = mfmatches(ctx1)
1371 if working:
1378 if working:
1372 # we are comparing working dir against non-parent
1379 # we are comparing working dir against non-parent
1373 # generate a pseudo-manifest for the working dir
1380 # generate a pseudo-manifest for the working dir
1374 mf2 = mfmatches(self['.'])
1381 mf2 = mfmatches(self['.'])
1375 for f in cmp + modified + added:
1382 for f in cmp + modified + added:
1376 mf2[f] = None
1383 mf2[f] = None
1377 mf2.set(f, ctx2.flags(f))
1384 mf2.set(f, ctx2.flags(f))
1378 for f in removed:
1385 for f in removed:
1379 if f in mf2:
1386 if f in mf2:
1380 del mf2[f]
1387 del mf2[f]
1381 else:
1388 else:
1382 # we are comparing two revisions
1389 # we are comparing two revisions
1383 deleted, unknown, ignored = [], [], []
1390 deleted, unknown, ignored = [], [], []
1384 mf2 = mfmatches(ctx2)
1391 mf2 = mfmatches(ctx2)
1385
1392
1386 modified, added, clean = [], [], []
1393 modified, added, clean = [], [], []
1387 for fn in mf2:
1394 for fn in mf2:
1388 if fn in mf1:
1395 if fn in mf1:
1389 if (fn not in deleted and
1396 if (fn not in deleted and
1390 (mf1.flags(fn) != mf2.flags(fn) or
1397 (mf1.flags(fn) != mf2.flags(fn) or
1391 (mf1[fn] != mf2[fn] and
1398 (mf1[fn] != mf2[fn] and
1392 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1399 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1393 modified.append(fn)
1400 modified.append(fn)
1394 elif listclean:
1401 elif listclean:
1395 clean.append(fn)
1402 clean.append(fn)
1396 del mf1[fn]
1403 del mf1[fn]
1397 elif fn not in deleted:
1404 elif fn not in deleted:
1398 added.append(fn)
1405 added.append(fn)
1399 removed = mf1.keys()
1406 removed = mf1.keys()
1400
1407
1401 if working and modified and not self.dirstate._checklink:
1408 if working and modified and not self.dirstate._checklink:
1402 # Symlink placeholders may get non-symlink-like contents
1409 # Symlink placeholders may get non-symlink-like contents
1403 # via user error or dereferencing by NFS or Samba servers,
1410 # via user error or dereferencing by NFS or Samba servers,
1404 # so we filter out any placeholders that don't look like a
1411 # so we filter out any placeholders that don't look like a
1405 # symlink
1412 # symlink
1406 sane = []
1413 sane = []
1407 for f in modified:
1414 for f in modified:
1408 if ctx2.flags(f) == 'l':
1415 if ctx2.flags(f) == 'l':
1409 d = ctx2[f].data()
1416 d = ctx2[f].data()
1410 if len(d) >= 1024 or '\n' in d or util.binary(d):
1417 if len(d) >= 1024 or '\n' in d or util.binary(d):
1411 self.ui.debug('ignoring suspect symlink placeholder'
1418 self.ui.debug('ignoring suspect symlink placeholder'
1412 ' "%s"\n' % f)
1419 ' "%s"\n' % f)
1413 continue
1420 continue
1414 sane.append(f)
1421 sane.append(f)
1415 modified = sane
1422 modified = sane
1416
1423
1417 r = modified, added, removed, deleted, unknown, ignored, clean
1424 r = modified, added, removed, deleted, unknown, ignored, clean
1418
1425
1419 if listsubrepos:
1426 if listsubrepos:
1420 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1427 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1421 if working:
1428 if working:
1422 rev2 = None
1429 rev2 = None
1423 else:
1430 else:
1424 rev2 = ctx2.substate[subpath][1]
1431 rev2 = ctx2.substate[subpath][1]
1425 try:
1432 try:
1426 submatch = matchmod.narrowmatcher(subpath, match)
1433 submatch = matchmod.narrowmatcher(subpath, match)
1427 s = sub.status(rev2, match=submatch, ignored=listignored,
1434 s = sub.status(rev2, match=submatch, ignored=listignored,
1428 clean=listclean, unknown=listunknown,
1435 clean=listclean, unknown=listunknown,
1429 listsubrepos=True)
1436 listsubrepos=True)
1430 for rfiles, sfiles in zip(r, s):
1437 for rfiles, sfiles in zip(r, s):
1431 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1438 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1432 except error.LookupError:
1439 except error.LookupError:
1433 self.ui.status(_("skipping missing subrepository: %s\n")
1440 self.ui.status(_("skipping missing subrepository: %s\n")
1434 % subpath)
1441 % subpath)
1435
1442
1436 for l in r:
1443 for l in r:
1437 l.sort()
1444 l.sort()
1438 return r
1445 return r
1439
1446
1440 def heads(self, start=None):
1447 def heads(self, start=None):
1441 heads = self.changelog.heads(start)
1448 heads = self.changelog.heads(start)
1442 # sort the output in rev descending order
1449 # sort the output in rev descending order
1443 return sorted(heads, key=self.changelog.rev, reverse=True)
1450 return sorted(heads, key=self.changelog.rev, reverse=True)
1444
1451
1445 def branchheads(self, branch=None, start=None, closed=False):
1452 def branchheads(self, branch=None, start=None, closed=False):
1446 '''return a (possibly filtered) list of heads for the given branch
1453 '''return a (possibly filtered) list of heads for the given branch
1447
1454
1448 Heads are returned in topological order, from newest to oldest.
1455 Heads are returned in topological order, from newest to oldest.
1449 If branch is None, use the dirstate branch.
1456 If branch is None, use the dirstate branch.
1450 If start is not None, return only heads reachable from start.
1457 If start is not None, return only heads reachable from start.
1451 If closed is True, return heads that are marked as closed as well.
1458 If closed is True, return heads that are marked as closed as well.
1452 '''
1459 '''
1453 if branch is None:
1460 if branch is None:
1454 branch = self[None].branch()
1461 branch = self[None].branch()
1455 branches = self.branchmap()
1462 branches = self.branchmap()
1456 if branch not in branches:
1463 if branch not in branches:
1457 return []
1464 return []
1458 # the cache returns heads ordered lowest to highest
1465 # the cache returns heads ordered lowest to highest
1459 bheads = list(reversed(branches[branch]))
1466 bheads = list(reversed(branches[branch]))
1460 if start is not None:
1467 if start is not None:
1461 # filter out the heads that cannot be reached from startrev
1468 # filter out the heads that cannot be reached from startrev
1462 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1469 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1463 bheads = [h for h in bheads if h in fbheads]
1470 bheads = [h for h in bheads if h in fbheads]
1464 if not closed:
1471 if not closed:
1465 bheads = [h for h in bheads if
1472 bheads = [h for h in bheads if
1466 ('close' not in self.changelog.read(h)[5])]
1473 ('close' not in self.changelog.read(h)[5])]
1467 return bheads
1474 return bheads
1468
1475
1469 def branches(self, nodes):
1476 def branches(self, nodes):
1470 if not nodes:
1477 if not nodes:
1471 nodes = [self.changelog.tip()]
1478 nodes = [self.changelog.tip()]
1472 b = []
1479 b = []
1473 for n in nodes:
1480 for n in nodes:
1474 t = n
1481 t = n
1475 while True:
1482 while True:
1476 p = self.changelog.parents(n)
1483 p = self.changelog.parents(n)
1477 if p[1] != nullid or p[0] == nullid:
1484 if p[1] != nullid or p[0] == nullid:
1478 b.append((t, n, p[0], p[1]))
1485 b.append((t, n, p[0], p[1]))
1479 break
1486 break
1480 n = p[0]
1487 n = p[0]
1481 return b
1488 return b
1482
1489
1483 def between(self, pairs):
1490 def between(self, pairs):
1484 r = []
1491 r = []
1485
1492
1486 for top, bottom in pairs:
1493 for top, bottom in pairs:
1487 n, l, i = top, [], 0
1494 n, l, i = top, [], 0
1488 f = 1
1495 f = 1
1489
1496
1490 while n != bottom and n != nullid:
1497 while n != bottom and n != nullid:
1491 p = self.changelog.parents(n)[0]
1498 p = self.changelog.parents(n)[0]
1492 if i == f:
1499 if i == f:
1493 l.append(n)
1500 l.append(n)
1494 f = f * 2
1501 f = f * 2
1495 n = p
1502 n = p
1496 i += 1
1503 i += 1
1497
1504
1498 r.append(l)
1505 r.append(l)
1499
1506
1500 return r
1507 return r
1501
1508
1502 def pull(self, remote, heads=None, force=False):
1509 def pull(self, remote, heads=None, force=False):
1503 lock = self.lock()
1510 lock = self.lock()
1504 try:
1511 try:
1505 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1512 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1506 force=force)
1513 force=force)
1507 common, fetch, rheads = tmp
1514 common, fetch, rheads = tmp
1508 if not fetch:
1515 if not fetch:
1509 self.ui.status(_("no changes found\n"))
1516 self.ui.status(_("no changes found\n"))
1510 added = []
1517 added = []
1511 result = 0
1518 result = 0
1512 else:
1519 else:
1513 if heads is None and list(common) == [nullid]:
1520 if heads is None and list(common) == [nullid]:
1514 self.ui.status(_("requesting all changes\n"))
1521 self.ui.status(_("requesting all changes\n"))
1515 elif heads is None and remote.capable('changegroupsubset'):
1522 elif heads is None and remote.capable('changegroupsubset'):
1516 # issue1320, avoid a race if remote changed after discovery
1523 # issue1320, avoid a race if remote changed after discovery
1517 heads = rheads
1524 heads = rheads
1518
1525
1519 if remote.capable('getbundle'):
1526 if remote.capable('getbundle'):
1520 cg = remote.getbundle('pull', common=common,
1527 cg = remote.getbundle('pull', common=common,
1521 heads=heads or rheads)
1528 heads=heads or rheads)
1522 elif heads is None:
1529 elif heads is None:
1523 cg = remote.changegroup(fetch, 'pull')
1530 cg = remote.changegroup(fetch, 'pull')
1524 elif not remote.capable('changegroupsubset'):
1531 elif not remote.capable('changegroupsubset'):
1525 raise util.Abort(_("partial pull cannot be done because "
1532 raise util.Abort(_("partial pull cannot be done because "
1526 "other repository doesn't support "
1533 "other repository doesn't support "
1527 "changegroupsubset."))
1534 "changegroupsubset."))
1528 else:
1535 else:
1529 cg = remote.changegroupsubset(fetch, heads, 'pull')
1536 cg = remote.changegroupsubset(fetch, heads, 'pull')
1530 clstart = len(self.changelog)
1537 clstart = len(self.changelog)
1531 result = self.addchangegroup(cg, 'pull', remote.url())
1538 result = self.addchangegroup(cg, 'pull', remote.url())
1532 clend = len(self.changelog)
1539 clend = len(self.changelog)
1533 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1540 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1534
1541
1535
1542
1536 # Get remote phases data from remote
1543 # Get remote phases data from remote
1537 remotephases = remote.listkeys('phases')
1544 remotephases = remote.listkeys('phases')
1538 publishing = bool(remotephases.get('publishing', False))
1545 publishing = bool(remotephases.get('publishing', False))
1539 if remotephases and not publishing:
1546 if remotephases and not publishing:
1540 # remote is new and unpublishing
1547 # remote is new and unpublishing
1541 subset = common + added
1548 subset = common + added
1542 rheads, rroots = phases.analyzeremotephases(self, subset,
1549 rheads, rroots = phases.analyzeremotephases(self, subset,
1543 remotephases)
1550 remotephases)
1544 for phase, boundary in enumerate(rheads):
1551 for phase, boundary in enumerate(rheads):
1545 phases.advanceboundary(self, phase, boundary)
1552 phases.advanceboundary(self, phase, boundary)
1546 else:
1553 else:
1547 # Remote is old or publishing all common changesets
1554 # Remote is old or publishing all common changesets
1548 # should be seen as public
1555 # should be seen as public
1549 phases.advanceboundary(self, 0, common + added)
1556 phases.advanceboundary(self, 0, common + added)
1550 finally:
1557 finally:
1551 lock.release()
1558 lock.release()
1552
1559
1553 return result
1560 return result
1554
1561
1555 def checkpush(self, force, revs):
1562 def checkpush(self, force, revs):
1556 """Extensions can override this function if additional checks have
1563 """Extensions can override this function if additional checks have
1557 to be performed before pushing, or call it if they override push
1564 to be performed before pushing, or call it if they override push
1558 command.
1565 command.
1559 """
1566 """
1560 pass
1567 pass
1561
1568
1562 def push(self, remote, force=False, revs=None, newbranch=False):
1569 def push(self, remote, force=False, revs=None, newbranch=False):
1563 '''Push outgoing changesets (limited by revs) from the current
1570 '''Push outgoing changesets (limited by revs) from the current
1564 repository to remote. Return an integer:
1571 repository to remote. Return an integer:
1565 - 0 means HTTP error *or* nothing to push
1572 - 0 means HTTP error *or* nothing to push
1566 - 1 means we pushed and remote head count is unchanged *or*
1573 - 1 means we pushed and remote head count is unchanged *or*
1567 we have outgoing changesets but refused to push
1574 we have outgoing changesets but refused to push
1568 - other values as described by addchangegroup()
1575 - other values as described by addchangegroup()
1569 '''
1576 '''
1570 # there are two ways to push to remote repo:
1577 # there are two ways to push to remote repo:
1571 #
1578 #
1572 # addchangegroup assumes local user can lock remote
1579 # addchangegroup assumes local user can lock remote
1573 # repo (local filesystem, old ssh servers).
1580 # repo (local filesystem, old ssh servers).
1574 #
1581 #
1575 # unbundle assumes local user cannot lock remote repo (new ssh
1582 # unbundle assumes local user cannot lock remote repo (new ssh
1576 # servers, http servers).
1583 # servers, http servers).
1577
1584
1578 self.checkpush(force, revs)
1585 self.checkpush(force, revs)
1579 lock = None
1586 lock = None
1580 unbundle = remote.capable('unbundle')
1587 unbundle = remote.capable('unbundle')
1581 if not unbundle:
1588 if not unbundle:
1582 lock = remote.lock()
1589 lock = remote.lock()
1583 try:
1590 try:
1584 # get local lock as we might write phase data
1591 # get local lock as we might write phase data
1585 locallock = self.lock()
1592 locallock = self.lock()
1586 try:
1593 try:
1587 cg, remote_heads, fut = discovery.prepush(self, remote, force,
1594 cg, remote_heads, fut = discovery.prepush(self, remote, force,
1588 revs, newbranch)
1595 revs, newbranch)
1589 ret = remote_heads
1596 ret = remote_heads
1590 # create a callback for addchangegroup.
1597 # create a callback for addchangegroup.
1591 # If will be used branch of the conditionnal too.
1598 # If will be used branch of the conditionnal too.
1592 if cg is not None:
1599 if cg is not None:
1593 if unbundle:
1600 if unbundle:
1594 # local repo finds heads on server, finds out what
1601 # local repo finds heads on server, finds out what
1595 # revs it must push. once revs transferred, if server
1602 # revs it must push. once revs transferred, if server
1596 # finds it has different heads (someone else won
1603 # finds it has different heads (someone else won
1597 # commit/push race), server aborts.
1604 # commit/push race), server aborts.
1598 if force:
1605 if force:
1599 remote_heads = ['force']
1606 remote_heads = ['force']
1600 # ssh: return remote's addchangegroup()
1607 # ssh: return remote's addchangegroup()
1601 # http: return remote's addchangegroup() or 0 for error
1608 # http: return remote's addchangegroup() or 0 for error
1602 ret = remote.unbundle(cg, remote_heads, 'push')
1609 ret = remote.unbundle(cg, remote_heads, 'push')
1603 else:
1610 else:
1604 # we return an integer indicating remote head count change
1611 # we return an integer indicating remote head count change
1605 ret = remote.addchangegroup(cg, 'push', self.url())
1612 ret = remote.addchangegroup(cg, 'push', self.url())
1606
1613
1607 # even when we don't push, exchanging phase data is useful
1614 # even when we don't push, exchanging phase data is useful
1608 remotephases = remote.listkeys('phases')
1615 remotephases = remote.listkeys('phases')
1609 if not remotephases: # old server or public only repo
1616 if not remotephases: # old server or public only repo
1610 phases.advanceboundary(self, 0, fut)
1617 phases.advanceboundary(self, 0, fut)
1611 # don't push any phase data as there is nothing to push
1618 # don't push any phase data as there is nothing to push
1612 else:
1619 else:
1613 ana = phases.analyzeremotephases(self, fut, remotephases)
1620 ana = phases.analyzeremotephases(self, fut, remotephases)
1614 rheads, rroots = ana
1621 rheads, rroots = ana
1615 ### Apply remote phase on local
1622 ### Apply remote phase on local
1616 if remotephases.get('publishing', False):
1623 if remotephases.get('publishing', False):
1617 phases.advanceboundary(self, 0, fut)
1624 phases.advanceboundary(self, 0, fut)
1618 else: # publish = False
1625 else: # publish = False
1619 for phase, rpheads in enumerate(rheads):
1626 for phase, rpheads in enumerate(rheads):
1620 phases.advanceboundary(self, phase, rpheads)
1627 phases.advanceboundary(self, phase, rpheads)
1621 ### Apply local phase on remote
1628 ### Apply local phase on remote
1622 #
1629 #
1623 # XXX If push failed we should use strict common and not
1630 # XXX If push failed we should use strict common and not
1624 # future to avoir pushing phase data on unknown changeset.
1631 # future to avoir pushing phase data on unknown changeset.
1625 # This is to done later.
1632 # This is to done later.
1626 futctx = [self[n] for n in fut if n != nullid]
1633 futctx = [self[n] for n in fut if n != nullid]
1627 for phase in phases.trackedphases[::-1]:
1634 for phase in phases.trackedphases[::-1]:
1628 prevphase = phase -1
1635 prevphase = phase -1
1629 # get all candidate for head in previous phase
1636 # get all candidate for head in previous phase
1630 inprev = [ctx for ctx in futctx
1637 inprev = [ctx for ctx in futctx
1631 if ctx.phase() == prevphase]
1638 if ctx.phase() == prevphase]
1632 for newremotehead in self.set('heads(%ld & (%ln::))',
1639 for newremotehead in self.set('heads(%ld & (%ln::))',
1633 inprev, rroots[phase]):
1640 inprev, rroots[phase]):
1634 r = remote.pushkey('phases',
1641 r = remote.pushkey('phases',
1635 newremotehead.hex(),
1642 newremotehead.hex(),
1636 str(phase), str(prevphase))
1643 str(phase), str(prevphase))
1637 if not r:
1644 if not r:
1638 self.ui.warn(_('updating phase of %s'
1645 self.ui.warn(_('updating phase of %s'
1639 'to %s failed!\n')
1646 'to %s failed!\n')
1640 % (newremotehead, prevphase))
1647 % (newremotehead, prevphase))
1641 finally:
1648 finally:
1642 locallock.release()
1649 locallock.release()
1643 finally:
1650 finally:
1644 if lock is not None:
1651 if lock is not None:
1645 lock.release()
1652 lock.release()
1646
1653
1647 self.ui.debug("checking for updated bookmarks\n")
1654 self.ui.debug("checking for updated bookmarks\n")
1648 rb = remote.listkeys('bookmarks')
1655 rb = remote.listkeys('bookmarks')
1649 for k in rb.keys():
1656 for k in rb.keys():
1650 if k in self._bookmarks:
1657 if k in self._bookmarks:
1651 nr, nl = rb[k], hex(self._bookmarks[k])
1658 nr, nl = rb[k], hex(self._bookmarks[k])
1652 if nr in self:
1659 if nr in self:
1653 cr = self[nr]
1660 cr = self[nr]
1654 cl = self[nl]
1661 cl = self[nl]
1655 if cl in cr.descendants():
1662 if cl in cr.descendants():
1656 r = remote.pushkey('bookmarks', k, nr, nl)
1663 r = remote.pushkey('bookmarks', k, nr, nl)
1657 if r:
1664 if r:
1658 self.ui.status(_("updating bookmark %s\n") % k)
1665 self.ui.status(_("updating bookmark %s\n") % k)
1659 else:
1666 else:
1660 self.ui.warn(_('updating bookmark %s'
1667 self.ui.warn(_('updating bookmark %s'
1661 ' failed!\n') % k)
1668 ' failed!\n') % k)
1662
1669
1663 return ret
1670 return ret
1664
1671
1665 def changegroupinfo(self, nodes, source):
1672 def changegroupinfo(self, nodes, source):
1666 if self.ui.verbose or source == 'bundle':
1673 if self.ui.verbose or source == 'bundle':
1667 self.ui.status(_("%d changesets found\n") % len(nodes))
1674 self.ui.status(_("%d changesets found\n") % len(nodes))
1668 if self.ui.debugflag:
1675 if self.ui.debugflag:
1669 self.ui.debug("list of changesets:\n")
1676 self.ui.debug("list of changesets:\n")
1670 for node in nodes:
1677 for node in nodes:
1671 self.ui.debug("%s\n" % hex(node))
1678 self.ui.debug("%s\n" % hex(node))
1672
1679
1673 def changegroupsubset(self, bases, heads, source):
1680 def changegroupsubset(self, bases, heads, source):
1674 """Compute a changegroup consisting of all the nodes that are
1681 """Compute a changegroup consisting of all the nodes that are
1675 descendants of any of the bases and ancestors of any of the heads.
1682 descendants of any of the bases and ancestors of any of the heads.
1676 Return a chunkbuffer object whose read() method will return
1683 Return a chunkbuffer object whose read() method will return
1677 successive changegroup chunks.
1684 successive changegroup chunks.
1678
1685
1679 It is fairly complex as determining which filenodes and which
1686 It is fairly complex as determining which filenodes and which
1680 manifest nodes need to be included for the changeset to be complete
1687 manifest nodes need to be included for the changeset to be complete
1681 is non-trivial.
1688 is non-trivial.
1682
1689
1683 Another wrinkle is doing the reverse, figuring out which changeset in
1690 Another wrinkle is doing the reverse, figuring out which changeset in
1684 the changegroup a particular filenode or manifestnode belongs to.
1691 the changegroup a particular filenode or manifestnode belongs to.
1685 """
1692 """
1686 cl = self.changelog
1693 cl = self.changelog
1687 if not bases:
1694 if not bases:
1688 bases = [nullid]
1695 bases = [nullid]
1689 csets, bases, heads = cl.nodesbetween(bases, heads)
1696 csets, bases, heads = cl.nodesbetween(bases, heads)
1690 # We assume that all ancestors of bases are known
1697 # We assume that all ancestors of bases are known
1691 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1698 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1692 return self._changegroupsubset(common, csets, heads, source)
1699 return self._changegroupsubset(common, csets, heads, source)
1693
1700
1694 def getbundle(self, source, heads=None, common=None):
1701 def getbundle(self, source, heads=None, common=None):
1695 """Like changegroupsubset, but returns the set difference between the
1702 """Like changegroupsubset, but returns the set difference between the
1696 ancestors of heads and the ancestors common.
1703 ancestors of heads and the ancestors common.
1697
1704
1698 If heads is None, use the local heads. If common is None, use [nullid].
1705 If heads is None, use the local heads. If common is None, use [nullid].
1699
1706
1700 The nodes in common might not all be known locally due to the way the
1707 The nodes in common might not all be known locally due to the way the
1701 current discovery protocol works.
1708 current discovery protocol works.
1702 """
1709 """
1703 cl = self.changelog
1710 cl = self.changelog
1704 if common:
1711 if common:
1705 nm = cl.nodemap
1712 nm = cl.nodemap
1706 common = [n for n in common if n in nm]
1713 common = [n for n in common if n in nm]
1707 else:
1714 else:
1708 common = [nullid]
1715 common = [nullid]
1709 if not heads:
1716 if not heads:
1710 heads = cl.heads()
1717 heads = cl.heads()
1711 common, missing = cl.findcommonmissing(common, heads)
1718 common, missing = cl.findcommonmissing(common, heads)
1712 if not missing:
1719 if not missing:
1713 return None
1720 return None
1714 return self._changegroupsubset(common, missing, heads, source)
1721 return self._changegroupsubset(common, missing, heads, source)
1715
1722
1716 def _changegroupsubset(self, commonrevs, csets, heads, source):
1723 def _changegroupsubset(self, commonrevs, csets, heads, source):
1717
1724
1718 cl = self.changelog
1725 cl = self.changelog
1719 mf = self.manifest
1726 mf = self.manifest
1720 mfs = {} # needed manifests
1727 mfs = {} # needed manifests
1721 fnodes = {} # needed file nodes
1728 fnodes = {} # needed file nodes
1722 changedfiles = set()
1729 changedfiles = set()
1723 fstate = ['', {}]
1730 fstate = ['', {}]
1724 count = [0]
1731 count = [0]
1725
1732
1726 # can we go through the fast path ?
1733 # can we go through the fast path ?
1727 heads.sort()
1734 heads.sort()
1728 if heads == sorted(self.heads()):
1735 if heads == sorted(self.heads()):
1729 return self._changegroup(csets, source)
1736 return self._changegroup(csets, source)
1730
1737
1731 # slow path
1738 # slow path
1732 self.hook('preoutgoing', throw=True, source=source)
1739 self.hook('preoutgoing', throw=True, source=source)
1733 self.changegroupinfo(csets, source)
1740 self.changegroupinfo(csets, source)
1734
1741
1735 # filter any nodes that claim to be part of the known set
1742 # filter any nodes that claim to be part of the known set
1736 def prune(revlog, missing):
1743 def prune(revlog, missing):
1737 return [n for n in missing
1744 return [n for n in missing
1738 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1745 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1739
1746
1740 def lookup(revlog, x):
1747 def lookup(revlog, x):
1741 if revlog == cl:
1748 if revlog == cl:
1742 c = cl.read(x)
1749 c = cl.read(x)
1743 changedfiles.update(c[3])
1750 changedfiles.update(c[3])
1744 mfs.setdefault(c[0], x)
1751 mfs.setdefault(c[0], x)
1745 count[0] += 1
1752 count[0] += 1
1746 self.ui.progress(_('bundling'), count[0],
1753 self.ui.progress(_('bundling'), count[0],
1747 unit=_('changesets'), total=len(csets))
1754 unit=_('changesets'), total=len(csets))
1748 return x
1755 return x
1749 elif revlog == mf:
1756 elif revlog == mf:
1750 clnode = mfs[x]
1757 clnode = mfs[x]
1751 mdata = mf.readfast(x)
1758 mdata = mf.readfast(x)
1752 for f in changedfiles:
1759 for f in changedfiles:
1753 if f in mdata:
1760 if f in mdata:
1754 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1761 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1755 count[0] += 1
1762 count[0] += 1
1756 self.ui.progress(_('bundling'), count[0],
1763 self.ui.progress(_('bundling'), count[0],
1757 unit=_('manifests'), total=len(mfs))
1764 unit=_('manifests'), total=len(mfs))
1758 return mfs[x]
1765 return mfs[x]
1759 else:
1766 else:
1760 self.ui.progress(
1767 self.ui.progress(
1761 _('bundling'), count[0], item=fstate[0],
1768 _('bundling'), count[0], item=fstate[0],
1762 unit=_('files'), total=len(changedfiles))
1769 unit=_('files'), total=len(changedfiles))
1763 return fstate[1][x]
1770 return fstate[1][x]
1764
1771
1765 bundler = changegroup.bundle10(lookup)
1772 bundler = changegroup.bundle10(lookup)
1766 reorder = self.ui.config('bundle', 'reorder', 'auto')
1773 reorder = self.ui.config('bundle', 'reorder', 'auto')
1767 if reorder == 'auto':
1774 if reorder == 'auto':
1768 reorder = None
1775 reorder = None
1769 else:
1776 else:
1770 reorder = util.parsebool(reorder)
1777 reorder = util.parsebool(reorder)
1771
1778
1772 def gengroup():
1779 def gengroup():
1773 # Create a changenode group generator that will call our functions
1780 # Create a changenode group generator that will call our functions
1774 # back to lookup the owning changenode and collect information.
1781 # back to lookup the owning changenode and collect information.
1775 for chunk in cl.group(csets, bundler, reorder=reorder):
1782 for chunk in cl.group(csets, bundler, reorder=reorder):
1776 yield chunk
1783 yield chunk
1777 self.ui.progress(_('bundling'), None)
1784 self.ui.progress(_('bundling'), None)
1778
1785
1779 # Create a generator for the manifestnodes that calls our lookup
1786 # Create a generator for the manifestnodes that calls our lookup
1780 # and data collection functions back.
1787 # and data collection functions back.
1781 count[0] = 0
1788 count[0] = 0
1782 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1789 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1783 yield chunk
1790 yield chunk
1784 self.ui.progress(_('bundling'), None)
1791 self.ui.progress(_('bundling'), None)
1785
1792
1786 mfs.clear()
1793 mfs.clear()
1787
1794
1788 # Go through all our files in order sorted by name.
1795 # Go through all our files in order sorted by name.
1789 count[0] = 0
1796 count[0] = 0
1790 for fname in sorted(changedfiles):
1797 for fname in sorted(changedfiles):
1791 filerevlog = self.file(fname)
1798 filerevlog = self.file(fname)
1792 if not len(filerevlog):
1799 if not len(filerevlog):
1793 raise util.Abort(_("empty or missing revlog for %s") % fname)
1800 raise util.Abort(_("empty or missing revlog for %s") % fname)
1794 fstate[0] = fname
1801 fstate[0] = fname
1795 fstate[1] = fnodes.pop(fname, {})
1802 fstate[1] = fnodes.pop(fname, {})
1796
1803
1797 nodelist = prune(filerevlog, fstate[1])
1804 nodelist = prune(filerevlog, fstate[1])
1798 if nodelist:
1805 if nodelist:
1799 count[0] += 1
1806 count[0] += 1
1800 yield bundler.fileheader(fname)
1807 yield bundler.fileheader(fname)
1801 for chunk in filerevlog.group(nodelist, bundler, reorder):
1808 for chunk in filerevlog.group(nodelist, bundler, reorder):
1802 yield chunk
1809 yield chunk
1803
1810
1804 # Signal that no more groups are left.
1811 # Signal that no more groups are left.
1805 yield bundler.close()
1812 yield bundler.close()
1806 self.ui.progress(_('bundling'), None)
1813 self.ui.progress(_('bundling'), None)
1807
1814
1808 if csets:
1815 if csets:
1809 self.hook('outgoing', node=hex(csets[0]), source=source)
1816 self.hook('outgoing', node=hex(csets[0]), source=source)
1810
1817
1811 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1818 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1812
1819
1813 def changegroup(self, basenodes, source):
1820 def changegroup(self, basenodes, source):
1814 # to avoid a race we use changegroupsubset() (issue1320)
1821 # to avoid a race we use changegroupsubset() (issue1320)
1815 return self.changegroupsubset(basenodes, self.heads(), source)
1822 return self.changegroupsubset(basenodes, self.heads(), source)
1816
1823
1817 def _changegroup(self, nodes, source):
1824 def _changegroup(self, nodes, source):
1818 """Compute the changegroup of all nodes that we have that a recipient
1825 """Compute the changegroup of all nodes that we have that a recipient
1819 doesn't. Return a chunkbuffer object whose read() method will return
1826 doesn't. Return a chunkbuffer object whose read() method will return
1820 successive changegroup chunks.
1827 successive changegroup chunks.
1821
1828
1822 This is much easier than the previous function as we can assume that
1829 This is much easier than the previous function as we can assume that
1823 the recipient has any changenode we aren't sending them.
1830 the recipient has any changenode we aren't sending them.
1824
1831
1825 nodes is the set of nodes to send"""
1832 nodes is the set of nodes to send"""
1826
1833
1827 cl = self.changelog
1834 cl = self.changelog
1828 mf = self.manifest
1835 mf = self.manifest
1829 mfs = {}
1836 mfs = {}
1830 changedfiles = set()
1837 changedfiles = set()
1831 fstate = ['']
1838 fstate = ['']
1832 count = [0]
1839 count = [0]
1833
1840
1834 self.hook('preoutgoing', throw=True, source=source)
1841 self.hook('preoutgoing', throw=True, source=source)
1835 self.changegroupinfo(nodes, source)
1842 self.changegroupinfo(nodes, source)
1836
1843
1837 revset = set([cl.rev(n) for n in nodes])
1844 revset = set([cl.rev(n) for n in nodes])
1838
1845
1839 def gennodelst(log):
1846 def gennodelst(log):
1840 return [log.node(r) for r in log if log.linkrev(r) in revset]
1847 return [log.node(r) for r in log if log.linkrev(r) in revset]
1841
1848
1842 def lookup(revlog, x):
1849 def lookup(revlog, x):
1843 if revlog == cl:
1850 if revlog == cl:
1844 c = cl.read(x)
1851 c = cl.read(x)
1845 changedfiles.update(c[3])
1852 changedfiles.update(c[3])
1846 mfs.setdefault(c[0], x)
1853 mfs.setdefault(c[0], x)
1847 count[0] += 1
1854 count[0] += 1
1848 self.ui.progress(_('bundling'), count[0],
1855 self.ui.progress(_('bundling'), count[0],
1849 unit=_('changesets'), total=len(nodes))
1856 unit=_('changesets'), total=len(nodes))
1850 return x
1857 return x
1851 elif revlog == mf:
1858 elif revlog == mf:
1852 count[0] += 1
1859 count[0] += 1
1853 self.ui.progress(_('bundling'), count[0],
1860 self.ui.progress(_('bundling'), count[0],
1854 unit=_('manifests'), total=len(mfs))
1861 unit=_('manifests'), total=len(mfs))
1855 return cl.node(revlog.linkrev(revlog.rev(x)))
1862 return cl.node(revlog.linkrev(revlog.rev(x)))
1856 else:
1863 else:
1857 self.ui.progress(
1864 self.ui.progress(
1858 _('bundling'), count[0], item=fstate[0],
1865 _('bundling'), count[0], item=fstate[0],
1859 total=len(changedfiles), unit=_('files'))
1866 total=len(changedfiles), unit=_('files'))
1860 return cl.node(revlog.linkrev(revlog.rev(x)))
1867 return cl.node(revlog.linkrev(revlog.rev(x)))
1861
1868
1862 bundler = changegroup.bundle10(lookup)
1869 bundler = changegroup.bundle10(lookup)
1863 reorder = self.ui.config('bundle', 'reorder', 'auto')
1870 reorder = self.ui.config('bundle', 'reorder', 'auto')
1864 if reorder == 'auto':
1871 if reorder == 'auto':
1865 reorder = None
1872 reorder = None
1866 else:
1873 else:
1867 reorder = util.parsebool(reorder)
1874 reorder = util.parsebool(reorder)
1868
1875
1869 def gengroup():
1876 def gengroup():
1870 '''yield a sequence of changegroup chunks (strings)'''
1877 '''yield a sequence of changegroup chunks (strings)'''
1871 # construct a list of all changed files
1878 # construct a list of all changed files
1872
1879
1873 for chunk in cl.group(nodes, bundler, reorder=reorder):
1880 for chunk in cl.group(nodes, bundler, reorder=reorder):
1874 yield chunk
1881 yield chunk
1875 self.ui.progress(_('bundling'), None)
1882 self.ui.progress(_('bundling'), None)
1876
1883
1877 count[0] = 0
1884 count[0] = 0
1878 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1885 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1879 yield chunk
1886 yield chunk
1880 self.ui.progress(_('bundling'), None)
1887 self.ui.progress(_('bundling'), None)
1881
1888
1882 count[0] = 0
1889 count[0] = 0
1883 for fname in sorted(changedfiles):
1890 for fname in sorted(changedfiles):
1884 filerevlog = self.file(fname)
1891 filerevlog = self.file(fname)
1885 if not len(filerevlog):
1892 if not len(filerevlog):
1886 raise util.Abort(_("empty or missing revlog for %s") % fname)
1893 raise util.Abort(_("empty or missing revlog for %s") % fname)
1887 fstate[0] = fname
1894 fstate[0] = fname
1888 nodelist = gennodelst(filerevlog)
1895 nodelist = gennodelst(filerevlog)
1889 if nodelist:
1896 if nodelist:
1890 count[0] += 1
1897 count[0] += 1
1891 yield bundler.fileheader(fname)
1898 yield bundler.fileheader(fname)
1892 for chunk in filerevlog.group(nodelist, bundler, reorder):
1899 for chunk in filerevlog.group(nodelist, bundler, reorder):
1893 yield chunk
1900 yield chunk
1894 yield bundler.close()
1901 yield bundler.close()
1895 self.ui.progress(_('bundling'), None)
1902 self.ui.progress(_('bundling'), None)
1896
1903
1897 if nodes:
1904 if nodes:
1898 self.hook('outgoing', node=hex(nodes[0]), source=source)
1905 self.hook('outgoing', node=hex(nodes[0]), source=source)
1899
1906
1900 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1907 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1901
1908
1902 def addchangegroup(self, source, srctype, url, emptyok=False):
1909 def addchangegroup(self, source, srctype, url, emptyok=False):
1903 """Add the changegroup returned by source.read() to this repo.
1910 """Add the changegroup returned by source.read() to this repo.
1904 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1911 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1905 the URL of the repo where this changegroup is coming from.
1912 the URL of the repo where this changegroup is coming from.
1906
1913
1907 Return an integer summarizing the change to this repo:
1914 Return an integer summarizing the change to this repo:
1908 - nothing changed or no source: 0
1915 - nothing changed or no source: 0
1909 - more heads than before: 1+added heads (2..n)
1916 - more heads than before: 1+added heads (2..n)
1910 - fewer heads than before: -1-removed heads (-2..-n)
1917 - fewer heads than before: -1-removed heads (-2..-n)
1911 - number of heads stays the same: 1
1918 - number of heads stays the same: 1
1912 """
1919 """
1913 def csmap(x):
1920 def csmap(x):
1914 self.ui.debug("add changeset %s\n" % short(x))
1921 self.ui.debug("add changeset %s\n" % short(x))
1915 return len(cl)
1922 return len(cl)
1916
1923
1917 def revmap(x):
1924 def revmap(x):
1918 return cl.rev(x)
1925 return cl.rev(x)
1919
1926
1920 if not source:
1927 if not source:
1921 return 0
1928 return 0
1922
1929
1923 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1930 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1924
1931
1925 changesets = files = revisions = 0
1932 changesets = files = revisions = 0
1926 efiles = set()
1933 efiles = set()
1927
1934
1928 # write changelog data to temp files so concurrent readers will not see
1935 # write changelog data to temp files so concurrent readers will not see
1929 # inconsistent view
1936 # inconsistent view
1930 cl = self.changelog
1937 cl = self.changelog
1931 cl.delayupdate()
1938 cl.delayupdate()
1932 oldheads = cl.heads()
1939 oldheads = cl.heads()
1933
1940
1934 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1941 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1935 try:
1942 try:
1936 trp = weakref.proxy(tr)
1943 trp = weakref.proxy(tr)
1937 # pull off the changeset group
1944 # pull off the changeset group
1938 self.ui.status(_("adding changesets\n"))
1945 self.ui.status(_("adding changesets\n"))
1939 clstart = len(cl)
1946 clstart = len(cl)
1940 class prog(object):
1947 class prog(object):
1941 step = _('changesets')
1948 step = _('changesets')
1942 count = 1
1949 count = 1
1943 ui = self.ui
1950 ui = self.ui
1944 total = None
1951 total = None
1945 def __call__(self):
1952 def __call__(self):
1946 self.ui.progress(self.step, self.count, unit=_('chunks'),
1953 self.ui.progress(self.step, self.count, unit=_('chunks'),
1947 total=self.total)
1954 total=self.total)
1948 self.count += 1
1955 self.count += 1
1949 pr = prog()
1956 pr = prog()
1950 source.callback = pr
1957 source.callback = pr
1951
1958
1952 source.changelogheader()
1959 source.changelogheader()
1953 if (cl.addgroup(source, csmap, trp) is None
1960 if (cl.addgroup(source, csmap, trp) is None
1954 and not emptyok):
1961 and not emptyok):
1955 raise util.Abort(_("received changelog group is empty"))
1962 raise util.Abort(_("received changelog group is empty"))
1956 clend = len(cl)
1963 clend = len(cl)
1957 changesets = clend - clstart
1964 changesets = clend - clstart
1958 for c in xrange(clstart, clend):
1965 for c in xrange(clstart, clend):
1959 efiles.update(self[c].files())
1966 efiles.update(self[c].files())
1960 efiles = len(efiles)
1967 efiles = len(efiles)
1961 self.ui.progress(_('changesets'), None)
1968 self.ui.progress(_('changesets'), None)
1962
1969
1963 # pull off the manifest group
1970 # pull off the manifest group
1964 self.ui.status(_("adding manifests\n"))
1971 self.ui.status(_("adding manifests\n"))
1965 pr.step = _('manifests')
1972 pr.step = _('manifests')
1966 pr.count = 1
1973 pr.count = 1
1967 pr.total = changesets # manifests <= changesets
1974 pr.total = changesets # manifests <= changesets
1968 # no need to check for empty manifest group here:
1975 # no need to check for empty manifest group here:
1969 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1976 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1970 # no new manifest will be created and the manifest group will
1977 # no new manifest will be created and the manifest group will
1971 # be empty during the pull
1978 # be empty during the pull
1972 source.manifestheader()
1979 source.manifestheader()
1973 self.manifest.addgroup(source, revmap, trp)
1980 self.manifest.addgroup(source, revmap, trp)
1974 self.ui.progress(_('manifests'), None)
1981 self.ui.progress(_('manifests'), None)
1975
1982
1976 needfiles = {}
1983 needfiles = {}
1977 if self.ui.configbool('server', 'validate', default=False):
1984 if self.ui.configbool('server', 'validate', default=False):
1978 # validate incoming csets have their manifests
1985 # validate incoming csets have their manifests
1979 for cset in xrange(clstart, clend):
1986 for cset in xrange(clstart, clend):
1980 mfest = self.changelog.read(self.changelog.node(cset))[0]
1987 mfest = self.changelog.read(self.changelog.node(cset))[0]
1981 mfest = self.manifest.readdelta(mfest)
1988 mfest = self.manifest.readdelta(mfest)
1982 # store file nodes we must see
1989 # store file nodes we must see
1983 for f, n in mfest.iteritems():
1990 for f, n in mfest.iteritems():
1984 needfiles.setdefault(f, set()).add(n)
1991 needfiles.setdefault(f, set()).add(n)
1985
1992
1986 # process the files
1993 # process the files
1987 self.ui.status(_("adding file changes\n"))
1994 self.ui.status(_("adding file changes\n"))
1988 pr.step = _('files')
1995 pr.step = _('files')
1989 pr.count = 1
1996 pr.count = 1
1990 pr.total = efiles
1997 pr.total = efiles
1991 source.callback = None
1998 source.callback = None
1992
1999
1993 while True:
2000 while True:
1994 chunkdata = source.filelogheader()
2001 chunkdata = source.filelogheader()
1995 if not chunkdata:
2002 if not chunkdata:
1996 break
2003 break
1997 f = chunkdata["filename"]
2004 f = chunkdata["filename"]
1998 self.ui.debug("adding %s revisions\n" % f)
2005 self.ui.debug("adding %s revisions\n" % f)
1999 pr()
2006 pr()
2000 fl = self.file(f)
2007 fl = self.file(f)
2001 o = len(fl)
2008 o = len(fl)
2002 if fl.addgroup(source, revmap, trp) is None:
2009 if fl.addgroup(source, revmap, trp) is None:
2003 raise util.Abort(_("received file revlog group is empty"))
2010 raise util.Abort(_("received file revlog group is empty"))
2004 revisions += len(fl) - o
2011 revisions += len(fl) - o
2005 files += 1
2012 files += 1
2006 if f in needfiles:
2013 if f in needfiles:
2007 needs = needfiles[f]
2014 needs = needfiles[f]
2008 for new in xrange(o, len(fl)):
2015 for new in xrange(o, len(fl)):
2009 n = fl.node(new)
2016 n = fl.node(new)
2010 if n in needs:
2017 if n in needs:
2011 needs.remove(n)
2018 needs.remove(n)
2012 if not needs:
2019 if not needs:
2013 del needfiles[f]
2020 del needfiles[f]
2014 self.ui.progress(_('files'), None)
2021 self.ui.progress(_('files'), None)
2015
2022
2016 for f, needs in needfiles.iteritems():
2023 for f, needs in needfiles.iteritems():
2017 fl = self.file(f)
2024 fl = self.file(f)
2018 for n in needs:
2025 for n in needs:
2019 try:
2026 try:
2020 fl.rev(n)
2027 fl.rev(n)
2021 except error.LookupError:
2028 except error.LookupError:
2022 raise util.Abort(
2029 raise util.Abort(
2023 _('missing file data for %s:%s - run hg verify') %
2030 _('missing file data for %s:%s - run hg verify') %
2024 (f, hex(n)))
2031 (f, hex(n)))
2025
2032
2026 dh = 0
2033 dh = 0
2027 if oldheads:
2034 if oldheads:
2028 heads = cl.heads()
2035 heads = cl.heads()
2029 dh = len(heads) - len(oldheads)
2036 dh = len(heads) - len(oldheads)
2030 for h in heads:
2037 for h in heads:
2031 if h not in oldheads and 'close' in self[h].extra():
2038 if h not in oldheads and 'close' in self[h].extra():
2032 dh -= 1
2039 dh -= 1
2033 htext = ""
2040 htext = ""
2034 if dh:
2041 if dh:
2035 htext = _(" (%+d heads)") % dh
2042 htext = _(" (%+d heads)") % dh
2036
2043
2037 self.ui.status(_("added %d changesets"
2044 self.ui.status(_("added %d changesets"
2038 " with %d changes to %d files%s\n")
2045 " with %d changes to %d files%s\n")
2039 % (changesets, revisions, files, htext))
2046 % (changesets, revisions, files, htext))
2040
2047
2041 if changesets > 0:
2048 if changesets > 0:
2042 p = lambda: cl.writepending() and self.root or ""
2049 p = lambda: cl.writepending() and self.root or ""
2043 self.hook('pretxnchangegroup', throw=True,
2050 self.hook('pretxnchangegroup', throw=True,
2044 node=hex(cl.node(clstart)), source=srctype,
2051 node=hex(cl.node(clstart)), source=srctype,
2045 url=url, pending=p)
2052 url=url, pending=p)
2046
2053
2047 added = [cl.node(r) for r in xrange(clstart, clend)]
2054 added = [cl.node(r) for r in xrange(clstart, clend)]
2048 publishing = self.ui.configbool('phases', 'publish', True)
2055 publishing = self.ui.configbool('phases', 'publish', True)
2049 if publishing and srctype == 'push':
2056 if publishing and srctype == 'push':
2050 # Old server can not push the boundary themself.
2057 # Old server can not push the boundary themself.
2051 # This clause ensure pushed changeset are alway marked as public
2058 # This clause ensure pushed changeset are alway marked as public
2052 phases.advanceboundary(self, 0, added)
2059 phases.advanceboundary(self, 0, added)
2053 elif srctype != 'strip': # strip should not touch boundary at all
2060 elif srctype != 'strip': # strip should not touch boundary at all
2054 phases.retractboundary(self, 1, added)
2061 phases.retractboundary(self, 1, added)
2055
2062
2056 # make changelog see real files again
2063 # make changelog see real files again
2057 cl.finalize(trp)
2064 cl.finalize(trp)
2058
2065
2059 tr.close()
2066 tr.close()
2060
2067
2061 if changesets > 0:
2068 if changesets > 0:
2062 def runhooks():
2069 def runhooks():
2063 # forcefully update the on-disk branch cache
2070 # forcefully update the on-disk branch cache
2064 self.ui.debug("updating the branch cache\n")
2071 self.ui.debug("updating the branch cache\n")
2065 self.updatebranchcache()
2072 self.updatebranchcache()
2066 self.hook("changegroup", node=hex(cl.node(clstart)),
2073 self.hook("changegroup", node=hex(cl.node(clstart)),
2067 source=srctype, url=url)
2074 source=srctype, url=url)
2068
2075
2069 for n in added:
2076 for n in added:
2070 self.hook("incoming", node=hex(n), source=srctype,
2077 self.hook("incoming", node=hex(n), source=srctype,
2071 url=url)
2078 url=url)
2072 self._afterlock(runhooks)
2079 self._afterlock(runhooks)
2073
2080
2074 finally:
2081 finally:
2075 tr.release()
2082 tr.release()
2076 # never return 0 here:
2083 # never return 0 here:
2077 if dh < 0:
2084 if dh < 0:
2078 return dh - 1
2085 return dh - 1
2079 else:
2086 else:
2080 return dh + 1
2087 return dh + 1
2081
2088
2082 def stream_in(self, remote, requirements):
2089 def stream_in(self, remote, requirements):
2083 lock = self.lock()
2090 lock = self.lock()
2084 try:
2091 try:
2085 fp = remote.stream_out()
2092 fp = remote.stream_out()
2086 l = fp.readline()
2093 l = fp.readline()
2087 try:
2094 try:
2088 resp = int(l)
2095 resp = int(l)
2089 except ValueError:
2096 except ValueError:
2090 raise error.ResponseError(
2097 raise error.ResponseError(
2091 _('Unexpected response from remote server:'), l)
2098 _('Unexpected response from remote server:'), l)
2092 if resp == 1:
2099 if resp == 1:
2093 raise util.Abort(_('operation forbidden by server'))
2100 raise util.Abort(_('operation forbidden by server'))
2094 elif resp == 2:
2101 elif resp == 2:
2095 raise util.Abort(_('locking the remote repository failed'))
2102 raise util.Abort(_('locking the remote repository failed'))
2096 elif resp != 0:
2103 elif resp != 0:
2097 raise util.Abort(_('the server sent an unknown error code'))
2104 raise util.Abort(_('the server sent an unknown error code'))
2098 self.ui.status(_('streaming all changes\n'))
2105 self.ui.status(_('streaming all changes\n'))
2099 l = fp.readline()
2106 l = fp.readline()
2100 try:
2107 try:
2101 total_files, total_bytes = map(int, l.split(' ', 1))
2108 total_files, total_bytes = map(int, l.split(' ', 1))
2102 except (ValueError, TypeError):
2109 except (ValueError, TypeError):
2103 raise error.ResponseError(
2110 raise error.ResponseError(
2104 _('Unexpected response from remote server:'), l)
2111 _('Unexpected response from remote server:'), l)
2105 self.ui.status(_('%d files to transfer, %s of data\n') %
2112 self.ui.status(_('%d files to transfer, %s of data\n') %
2106 (total_files, util.bytecount(total_bytes)))
2113 (total_files, util.bytecount(total_bytes)))
2107 start = time.time()
2114 start = time.time()
2108 for i in xrange(total_files):
2115 for i in xrange(total_files):
2109 # XXX doesn't support '\n' or '\r' in filenames
2116 # XXX doesn't support '\n' or '\r' in filenames
2110 l = fp.readline()
2117 l = fp.readline()
2111 try:
2118 try:
2112 name, size = l.split('\0', 1)
2119 name, size = l.split('\0', 1)
2113 size = int(size)
2120 size = int(size)
2114 except (ValueError, TypeError):
2121 except (ValueError, TypeError):
2115 raise error.ResponseError(
2122 raise error.ResponseError(
2116 _('Unexpected response from remote server:'), l)
2123 _('Unexpected response from remote server:'), l)
2117 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2124 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2118 # for backwards compat, name was partially encoded
2125 # for backwards compat, name was partially encoded
2119 ofp = self.sopener(store.decodedir(name), 'w')
2126 ofp = self.sopener(store.decodedir(name), 'w')
2120 for chunk in util.filechunkiter(fp, limit=size):
2127 for chunk in util.filechunkiter(fp, limit=size):
2121 ofp.write(chunk)
2128 ofp.write(chunk)
2122 ofp.close()
2129 ofp.close()
2123 elapsed = time.time() - start
2130 elapsed = time.time() - start
2124 if elapsed <= 0:
2131 if elapsed <= 0:
2125 elapsed = 0.001
2132 elapsed = 0.001
2126 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2133 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2127 (util.bytecount(total_bytes), elapsed,
2134 (util.bytecount(total_bytes), elapsed,
2128 util.bytecount(total_bytes / elapsed)))
2135 util.bytecount(total_bytes / elapsed)))
2129
2136
2130 # new requirements = old non-format requirements + new format-related
2137 # new requirements = old non-format requirements + new format-related
2131 # requirements from the streamed-in repository
2138 # requirements from the streamed-in repository
2132 requirements.update(set(self.requirements) - self.supportedformats)
2139 requirements.update(set(self.requirements) - self.supportedformats)
2133 self._applyrequirements(requirements)
2140 self._applyrequirements(requirements)
2134 self._writerequirements()
2141 self._writerequirements()
2135
2142
2136 self.invalidate()
2143 self.invalidate()
2137 return len(self.heads()) + 1
2144 return len(self.heads()) + 1
2138 finally:
2145 finally:
2139 lock.release()
2146 lock.release()
2140
2147
2141 def clone(self, remote, heads=[], stream=False):
2148 def clone(self, remote, heads=[], stream=False):
2142 '''clone remote repository.
2149 '''clone remote repository.
2143
2150
2144 keyword arguments:
2151 keyword arguments:
2145 heads: list of revs to clone (forces use of pull)
2152 heads: list of revs to clone (forces use of pull)
2146 stream: use streaming clone if possible'''
2153 stream: use streaming clone if possible'''
2147
2154
2148 # now, all clients that can request uncompressed clones can
2155 # now, all clients that can request uncompressed clones can
2149 # read repo formats supported by all servers that can serve
2156 # read repo formats supported by all servers that can serve
2150 # them.
2157 # them.
2151
2158
2152 # if revlog format changes, client will have to check version
2159 # if revlog format changes, client will have to check version
2153 # and format flags on "stream" capability, and use
2160 # and format flags on "stream" capability, and use
2154 # uncompressed only if compatible.
2161 # uncompressed only if compatible.
2155
2162
2156 if stream and not heads:
2163 if stream and not heads:
2157 # 'stream' means remote revlog format is revlogv1 only
2164 # 'stream' means remote revlog format is revlogv1 only
2158 if remote.capable('stream'):
2165 if remote.capable('stream'):
2159 return self.stream_in(remote, set(('revlogv1',)))
2166 return self.stream_in(remote, set(('revlogv1',)))
2160 # otherwise, 'streamreqs' contains the remote revlog format
2167 # otherwise, 'streamreqs' contains the remote revlog format
2161 streamreqs = remote.capable('streamreqs')
2168 streamreqs = remote.capable('streamreqs')
2162 if streamreqs:
2169 if streamreqs:
2163 streamreqs = set(streamreqs.split(','))
2170 streamreqs = set(streamreqs.split(','))
2164 # if we support it, stream in and adjust our requirements
2171 # if we support it, stream in and adjust our requirements
2165 if not streamreqs - self.supportedformats:
2172 if not streamreqs - self.supportedformats:
2166 return self.stream_in(remote, streamreqs)
2173 return self.stream_in(remote, streamreqs)
2167 return self.pull(remote, heads)
2174 return self.pull(remote, heads)
2168
2175
2169 def pushkey(self, namespace, key, old, new):
2176 def pushkey(self, namespace, key, old, new):
2170 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2177 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2171 old=old, new=new)
2178 old=old, new=new)
2172 ret = pushkey.push(self, namespace, key, old, new)
2179 ret = pushkey.push(self, namespace, key, old, new)
2173 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2180 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2174 ret=ret)
2181 ret=ret)
2175 return ret
2182 return ret
2176
2183
2177 def listkeys(self, namespace):
2184 def listkeys(self, namespace):
2178 self.hook('prelistkeys', throw=True, namespace=namespace)
2185 self.hook('prelistkeys', throw=True, namespace=namespace)
2179 values = pushkey.list(self, namespace)
2186 values = pushkey.list(self, namespace)
2180 self.hook('listkeys', namespace=namespace, values=values)
2187 self.hook('listkeys', namespace=namespace, values=values)
2181 return values
2188 return values
2182
2189
2183 def debugwireargs(self, one, two, three=None, four=None, five=None):
2190 def debugwireargs(self, one, two, three=None, four=None, five=None):
2184 '''used to test argument passing over the wire'''
2191 '''used to test argument passing over the wire'''
2185 return "%s %s %s %s %s" % (one, two, three, four, five)
2192 return "%s %s %s %s %s" % (one, two, three, four, five)
2186
2193
2187 def savecommitmessage(self, text):
2194 def savecommitmessage(self, text):
2188 fp = self.opener('last-message.txt', 'wb')
2195 fp = self.opener('last-message.txt', 'wb')
2189 try:
2196 try:
2190 fp.write(text)
2197 fp.write(text)
2191 finally:
2198 finally:
2192 fp.close()
2199 fp.close()
2193 return self.pathto(fp.name[len(self.root)+1:])
2200 return self.pathto(fp.name[len(self.root)+1:])
2194
2201
2195 # used to avoid circular references so destructors work
2202 # used to avoid circular references so destructors work
2196 def aftertrans(files):
2203 def aftertrans(files):
2197 renamefiles = [tuple(t) for t in files]
2204 renamefiles = [tuple(t) for t in files]
2198 def a():
2205 def a():
2199 for src, dest in renamefiles:
2206 for src, dest in renamefiles:
2200 util.rename(src, dest)
2207 util.rename(src, dest)
2201 return a
2208 return a
2202
2209
2203 def undoname(fn):
2210 def undoname(fn):
2204 base, name = os.path.split(fn)
2211 base, name = os.path.split(fn)
2205 assert name.startswith('journal')
2212 assert name.startswith('journal')
2206 return os.path.join(base, name.replace('journal', 'undo', 1))
2213 return os.path.join(base, name.replace('journal', 'undo', 1))
2207
2214
2208 def instance(ui, path, create):
2215 def instance(ui, path, create):
2209 return localrepository(ui, util.urllocalpath(path), create)
2216 return localrepository(ui, util.urllocalpath(path), create)
2210
2217
2211 def islocal(path):
2218 def islocal(path):
2212 return True
2219 return True
General Comments 0
You need to be logged in to leave comments. Login now