##// END OF EJS Templates
localrepo: decorate manifest() with filecache
Idan Kamara -
r14934:019fe0b0 default
parent child Browse files
Show More
@@ -1,2037 +1,2037 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context, bookmarks
11 import changelog, dirstate, filelog, manifest, context, bookmarks
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import scmutil, util, extensions, hook, error, revset
13 import scmutil, util, extensions, hook, error, revset
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 from lock import release
17 from lock import release
18 import weakref, errno, os, time, inspect
18 import weakref, errno, os, time, inspect
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20 filecache = scmutil.filecache
20 filecache = scmutil.filecache
21
21
22 class localrepository(repo.repository):
22 class localrepository(repo.repository):
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
24 'known', 'getbundle'))
24 'known', 'getbundle'))
25 supportedformats = set(('revlogv1', 'generaldelta'))
25 supportedformats = set(('revlogv1', 'generaldelta'))
26 supported = supportedformats | set(('store', 'fncache', 'shared',
26 supported = supportedformats | set(('store', 'fncache', 'shared',
27 'dotencode'))
27 'dotencode'))
28
28
29 def __init__(self, baseui, path=None, create=False):
29 def __init__(self, baseui, path=None, create=False):
30 repo.repository.__init__(self)
30 repo.repository.__init__(self)
31 self.root = os.path.realpath(util.expandpath(path))
31 self.root = os.path.realpath(util.expandpath(path))
32 self.path = os.path.join(self.root, ".hg")
32 self.path = os.path.join(self.root, ".hg")
33 self.origroot = path
33 self.origroot = path
34 self.auditor = scmutil.pathauditor(self.root, self._checknested)
34 self.auditor = scmutil.pathauditor(self.root, self._checknested)
35 self.opener = scmutil.opener(self.path)
35 self.opener = scmutil.opener(self.path)
36 self.wopener = scmutil.opener(self.root)
36 self.wopener = scmutil.opener(self.root)
37 self.baseui = baseui
37 self.baseui = baseui
38 self.ui = baseui.copy()
38 self.ui = baseui.copy()
39
39
40 try:
40 try:
41 self.ui.readconfig(self.join("hgrc"), self.root)
41 self.ui.readconfig(self.join("hgrc"), self.root)
42 extensions.loadall(self.ui)
42 extensions.loadall(self.ui)
43 except IOError:
43 except IOError:
44 pass
44 pass
45
45
46 if not os.path.isdir(self.path):
46 if not os.path.isdir(self.path):
47 if create:
47 if create:
48 if not os.path.exists(path):
48 if not os.path.exists(path):
49 util.makedirs(path)
49 util.makedirs(path)
50 util.makedir(self.path, notindexed=True)
50 util.makedir(self.path, notindexed=True)
51 requirements = ["revlogv1"]
51 requirements = ["revlogv1"]
52 if self.ui.configbool('format', 'usestore', True):
52 if self.ui.configbool('format', 'usestore', True):
53 os.mkdir(os.path.join(self.path, "store"))
53 os.mkdir(os.path.join(self.path, "store"))
54 requirements.append("store")
54 requirements.append("store")
55 if self.ui.configbool('format', 'usefncache', True):
55 if self.ui.configbool('format', 'usefncache', True):
56 requirements.append("fncache")
56 requirements.append("fncache")
57 if self.ui.configbool('format', 'dotencode', True):
57 if self.ui.configbool('format', 'dotencode', True):
58 requirements.append('dotencode')
58 requirements.append('dotencode')
59 # create an invalid changelog
59 # create an invalid changelog
60 self.opener.append(
60 self.opener.append(
61 "00changelog.i",
61 "00changelog.i",
62 '\0\0\0\2' # represents revlogv2
62 '\0\0\0\2' # represents revlogv2
63 ' dummy changelog to prevent using the old repo layout'
63 ' dummy changelog to prevent using the old repo layout'
64 )
64 )
65 if self.ui.configbool('format', 'generaldelta', False):
65 if self.ui.configbool('format', 'generaldelta', False):
66 requirements.append("generaldelta")
66 requirements.append("generaldelta")
67 requirements = set(requirements)
67 requirements = set(requirements)
68 else:
68 else:
69 raise error.RepoError(_("repository %s not found") % path)
69 raise error.RepoError(_("repository %s not found") % path)
70 elif create:
70 elif create:
71 raise error.RepoError(_("repository %s already exists") % path)
71 raise error.RepoError(_("repository %s already exists") % path)
72 else:
72 else:
73 try:
73 try:
74 requirements = scmutil.readrequires(self.opener, self.supported)
74 requirements = scmutil.readrequires(self.opener, self.supported)
75 except IOError, inst:
75 except IOError, inst:
76 if inst.errno != errno.ENOENT:
76 if inst.errno != errno.ENOENT:
77 raise
77 raise
78 requirements = set()
78 requirements = set()
79
79
80 self.sharedpath = self.path
80 self.sharedpath = self.path
81 try:
81 try:
82 s = os.path.realpath(self.opener.read("sharedpath"))
82 s = os.path.realpath(self.opener.read("sharedpath"))
83 if not os.path.exists(s):
83 if not os.path.exists(s):
84 raise error.RepoError(
84 raise error.RepoError(
85 _('.hg/sharedpath points to nonexistent directory %s') % s)
85 _('.hg/sharedpath points to nonexistent directory %s') % s)
86 self.sharedpath = s
86 self.sharedpath = s
87 except IOError, inst:
87 except IOError, inst:
88 if inst.errno != errno.ENOENT:
88 if inst.errno != errno.ENOENT:
89 raise
89 raise
90
90
91 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
91 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
92 self.spath = self.store.path
92 self.spath = self.store.path
93 self.sopener = self.store.opener
93 self.sopener = self.store.opener
94 self.sjoin = self.store.join
94 self.sjoin = self.store.join
95 self.opener.createmode = self.store.createmode
95 self.opener.createmode = self.store.createmode
96 self._applyrequirements(requirements)
96 self._applyrequirements(requirements)
97 if create:
97 if create:
98 self._writerequirements()
98 self._writerequirements()
99
99
100 # These two define the set of tags for this repository. _tags
100 # These two define the set of tags for this repository. _tags
101 # maps tag name to node; _tagtypes maps tag name to 'global' or
101 # maps tag name to node; _tagtypes maps tag name to 'global' or
102 # 'local'. (Global tags are defined by .hgtags across all
102 # 'local'. (Global tags are defined by .hgtags across all
103 # heads, and local tags are defined in .hg/localtags.) They
103 # heads, and local tags are defined in .hg/localtags.) They
104 # constitute the in-memory cache of tags.
104 # constitute the in-memory cache of tags.
105 self._tags = None
105 self._tags = None
106 self._tagtypes = None
106 self._tagtypes = None
107
107
108 self._branchcache = None
108 self._branchcache = None
109 self._branchcachetip = None
109 self._branchcachetip = None
110 self.nodetagscache = None
110 self.nodetagscache = None
111 self.filterpats = {}
111 self.filterpats = {}
112 self._datafilters = {}
112 self._datafilters = {}
113 self._transref = self._lockref = self._wlockref = None
113 self._transref = self._lockref = self._wlockref = None
114
114
115 # A cache for various files under .hg/ that tracks file changes,
115 # A cache for various files under .hg/ that tracks file changes,
116 # (used by the filecache decorator)
116 # (used by the filecache decorator)
117 #
117 #
118 # Maps a property name to its util.filecacheentry
118 # Maps a property name to its util.filecacheentry
119 self._filecache = {}
119 self._filecache = {}
120
120
121 def _applyrequirements(self, requirements):
121 def _applyrequirements(self, requirements):
122 self.requirements = requirements
122 self.requirements = requirements
123 openerreqs = set(('revlogv1', 'generaldelta'))
123 openerreqs = set(('revlogv1', 'generaldelta'))
124 self.sopener.options = dict((r, 1) for r in requirements
124 self.sopener.options = dict((r, 1) for r in requirements
125 if r in openerreqs)
125 if r in openerreqs)
126
126
127 def _writerequirements(self):
127 def _writerequirements(self):
128 reqfile = self.opener("requires", "w")
128 reqfile = self.opener("requires", "w")
129 for r in self.requirements:
129 for r in self.requirements:
130 reqfile.write("%s\n" % r)
130 reqfile.write("%s\n" % r)
131 reqfile.close()
131 reqfile.close()
132
132
133 def _checknested(self, path):
133 def _checknested(self, path):
134 """Determine if path is a legal nested repository."""
134 """Determine if path is a legal nested repository."""
135 if not path.startswith(self.root):
135 if not path.startswith(self.root):
136 return False
136 return False
137 subpath = path[len(self.root) + 1:]
137 subpath = path[len(self.root) + 1:]
138
138
139 # XXX: Checking against the current working copy is wrong in
139 # XXX: Checking against the current working copy is wrong in
140 # the sense that it can reject things like
140 # the sense that it can reject things like
141 #
141 #
142 # $ hg cat -r 10 sub/x.txt
142 # $ hg cat -r 10 sub/x.txt
143 #
143 #
144 # if sub/ is no longer a subrepository in the working copy
144 # if sub/ is no longer a subrepository in the working copy
145 # parent revision.
145 # parent revision.
146 #
146 #
147 # However, it can of course also allow things that would have
147 # However, it can of course also allow things that would have
148 # been rejected before, such as the above cat command if sub/
148 # been rejected before, such as the above cat command if sub/
149 # is a subrepository now, but was a normal directory before.
149 # is a subrepository now, but was a normal directory before.
150 # The old path auditor would have rejected by mistake since it
150 # The old path auditor would have rejected by mistake since it
151 # panics when it sees sub/.hg/.
151 # panics when it sees sub/.hg/.
152 #
152 #
153 # All in all, checking against the working copy seems sensible
153 # All in all, checking against the working copy seems sensible
154 # since we want to prevent access to nested repositories on
154 # since we want to prevent access to nested repositories on
155 # the filesystem *now*.
155 # the filesystem *now*.
156 ctx = self[None]
156 ctx = self[None]
157 parts = util.splitpath(subpath)
157 parts = util.splitpath(subpath)
158 while parts:
158 while parts:
159 prefix = os.sep.join(parts)
159 prefix = os.sep.join(parts)
160 if prefix in ctx.substate:
160 if prefix in ctx.substate:
161 if prefix == subpath:
161 if prefix == subpath:
162 return True
162 return True
163 else:
163 else:
164 sub = ctx.sub(prefix)
164 sub = ctx.sub(prefix)
165 return sub.checknested(subpath[len(prefix) + 1:])
165 return sub.checknested(subpath[len(prefix) + 1:])
166 else:
166 else:
167 parts.pop()
167 parts.pop()
168 return False
168 return False
169
169
170 @filecache('bookmarks')
170 @filecache('bookmarks')
171 def _bookmarks(self):
171 def _bookmarks(self):
172 return bookmarks.read(self)
172 return bookmarks.read(self)
173
173
174 @filecache('bookmarks.current')
174 @filecache('bookmarks.current')
175 def _bookmarkcurrent(self):
175 def _bookmarkcurrent(self):
176 return bookmarks.readcurrent(self)
176 return bookmarks.readcurrent(self)
177
177
178 @filecache('00changelog.i', True)
178 @filecache('00changelog.i', True)
179 def changelog(self):
179 def changelog(self):
180 c = changelog.changelog(self.sopener)
180 c = changelog.changelog(self.sopener)
181 if 'HG_PENDING' in os.environ:
181 if 'HG_PENDING' in os.environ:
182 p = os.environ['HG_PENDING']
182 p = os.environ['HG_PENDING']
183 if p.startswith(self.root):
183 if p.startswith(self.root):
184 c.readpending('00changelog.i.a')
184 c.readpending('00changelog.i.a')
185 return c
185 return c
186
186
187 @propertycache
187 @filecache('00manifest.i', True)
188 def manifest(self):
188 def manifest(self):
189 return manifest.manifest(self.sopener)
189 return manifest.manifest(self.sopener)
190
190
191 @filecache('dirstate')
191 @filecache('dirstate')
192 def dirstate(self):
192 def dirstate(self):
193 warned = [0]
193 warned = [0]
194 def validate(node):
194 def validate(node):
195 try:
195 try:
196 self.changelog.rev(node)
196 self.changelog.rev(node)
197 return node
197 return node
198 except error.LookupError:
198 except error.LookupError:
199 if not warned[0]:
199 if not warned[0]:
200 warned[0] = True
200 warned[0] = True
201 self.ui.warn(_("warning: ignoring unknown"
201 self.ui.warn(_("warning: ignoring unknown"
202 " working parent %s!\n") % short(node))
202 " working parent %s!\n") % short(node))
203 return nullid
203 return nullid
204
204
205 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
205 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
206
206
207 def __getitem__(self, changeid):
207 def __getitem__(self, changeid):
208 if changeid is None:
208 if changeid is None:
209 return context.workingctx(self)
209 return context.workingctx(self)
210 return context.changectx(self, changeid)
210 return context.changectx(self, changeid)
211
211
212 def __contains__(self, changeid):
212 def __contains__(self, changeid):
213 try:
213 try:
214 return bool(self.lookup(changeid))
214 return bool(self.lookup(changeid))
215 except error.RepoLookupError:
215 except error.RepoLookupError:
216 return False
216 return False
217
217
218 def __nonzero__(self):
218 def __nonzero__(self):
219 return True
219 return True
220
220
221 def __len__(self):
221 def __len__(self):
222 return len(self.changelog)
222 return len(self.changelog)
223
223
224 def __iter__(self):
224 def __iter__(self):
225 for i in xrange(len(self)):
225 for i in xrange(len(self)):
226 yield i
226 yield i
227
227
228 def set(self, expr, *args):
228 def set(self, expr, *args):
229 '''
229 '''
230 Yield a context for each matching revision, after doing arg
230 Yield a context for each matching revision, after doing arg
231 replacement via revset.formatspec
231 replacement via revset.formatspec
232 '''
232 '''
233
233
234 expr = revset.formatspec(expr, *args)
234 expr = revset.formatspec(expr, *args)
235 m = revset.match(None, expr)
235 m = revset.match(None, expr)
236 for r in m(self, range(len(self))):
236 for r in m(self, range(len(self))):
237 yield self[r]
237 yield self[r]
238
238
239 def url(self):
239 def url(self):
240 return 'file:' + self.root
240 return 'file:' + self.root
241
241
242 def hook(self, name, throw=False, **args):
242 def hook(self, name, throw=False, **args):
243 return hook.hook(self.ui, self, name, throw, **args)
243 return hook.hook(self.ui, self, name, throw, **args)
244
244
245 tag_disallowed = ':\r\n'
245 tag_disallowed = ':\r\n'
246
246
247 def _tag(self, names, node, message, local, user, date, extra={}):
247 def _tag(self, names, node, message, local, user, date, extra={}):
248 if isinstance(names, str):
248 if isinstance(names, str):
249 allchars = names
249 allchars = names
250 names = (names,)
250 names = (names,)
251 else:
251 else:
252 allchars = ''.join(names)
252 allchars = ''.join(names)
253 for c in self.tag_disallowed:
253 for c in self.tag_disallowed:
254 if c in allchars:
254 if c in allchars:
255 raise util.Abort(_('%r cannot be used in a tag name') % c)
255 raise util.Abort(_('%r cannot be used in a tag name') % c)
256
256
257 branches = self.branchmap()
257 branches = self.branchmap()
258 for name in names:
258 for name in names:
259 self.hook('pretag', throw=True, node=hex(node), tag=name,
259 self.hook('pretag', throw=True, node=hex(node), tag=name,
260 local=local)
260 local=local)
261 if name in branches:
261 if name in branches:
262 self.ui.warn(_("warning: tag %s conflicts with existing"
262 self.ui.warn(_("warning: tag %s conflicts with existing"
263 " branch name\n") % name)
263 " branch name\n") % name)
264
264
265 def writetags(fp, names, munge, prevtags):
265 def writetags(fp, names, munge, prevtags):
266 fp.seek(0, 2)
266 fp.seek(0, 2)
267 if prevtags and prevtags[-1] != '\n':
267 if prevtags and prevtags[-1] != '\n':
268 fp.write('\n')
268 fp.write('\n')
269 for name in names:
269 for name in names:
270 m = munge and munge(name) or name
270 m = munge and munge(name) or name
271 if self._tagtypes and name in self._tagtypes:
271 if self._tagtypes and name in self._tagtypes:
272 old = self._tags.get(name, nullid)
272 old = self._tags.get(name, nullid)
273 fp.write('%s %s\n' % (hex(old), m))
273 fp.write('%s %s\n' % (hex(old), m))
274 fp.write('%s %s\n' % (hex(node), m))
274 fp.write('%s %s\n' % (hex(node), m))
275 fp.close()
275 fp.close()
276
276
277 prevtags = ''
277 prevtags = ''
278 if local:
278 if local:
279 try:
279 try:
280 fp = self.opener('localtags', 'r+')
280 fp = self.opener('localtags', 'r+')
281 except IOError:
281 except IOError:
282 fp = self.opener('localtags', 'a')
282 fp = self.opener('localtags', 'a')
283 else:
283 else:
284 prevtags = fp.read()
284 prevtags = fp.read()
285
285
286 # local tags are stored in the current charset
286 # local tags are stored in the current charset
287 writetags(fp, names, None, prevtags)
287 writetags(fp, names, None, prevtags)
288 for name in names:
288 for name in names:
289 self.hook('tag', node=hex(node), tag=name, local=local)
289 self.hook('tag', node=hex(node), tag=name, local=local)
290 return
290 return
291
291
292 try:
292 try:
293 fp = self.wfile('.hgtags', 'rb+')
293 fp = self.wfile('.hgtags', 'rb+')
294 except IOError, e:
294 except IOError, e:
295 if e.errno != errno.ENOENT:
295 if e.errno != errno.ENOENT:
296 raise
296 raise
297 fp = self.wfile('.hgtags', 'ab')
297 fp = self.wfile('.hgtags', 'ab')
298 else:
298 else:
299 prevtags = fp.read()
299 prevtags = fp.read()
300
300
301 # committed tags are stored in UTF-8
301 # committed tags are stored in UTF-8
302 writetags(fp, names, encoding.fromlocal, prevtags)
302 writetags(fp, names, encoding.fromlocal, prevtags)
303
303
304 fp.close()
304 fp.close()
305
305
306 if '.hgtags' not in self.dirstate:
306 if '.hgtags' not in self.dirstate:
307 self[None].add(['.hgtags'])
307 self[None].add(['.hgtags'])
308
308
309 m = matchmod.exact(self.root, '', ['.hgtags'])
309 m = matchmod.exact(self.root, '', ['.hgtags'])
310 tagnode = self.commit(message, user, date, extra=extra, match=m)
310 tagnode = self.commit(message, user, date, extra=extra, match=m)
311
311
312 for name in names:
312 for name in names:
313 self.hook('tag', node=hex(node), tag=name, local=local)
313 self.hook('tag', node=hex(node), tag=name, local=local)
314
314
315 return tagnode
315 return tagnode
316
316
317 def tag(self, names, node, message, local, user, date):
317 def tag(self, names, node, message, local, user, date):
318 '''tag a revision with one or more symbolic names.
318 '''tag a revision with one or more symbolic names.
319
319
320 names is a list of strings or, when adding a single tag, names may be a
320 names is a list of strings or, when adding a single tag, names may be a
321 string.
321 string.
322
322
323 if local is True, the tags are stored in a per-repository file.
323 if local is True, the tags are stored in a per-repository file.
324 otherwise, they are stored in the .hgtags file, and a new
324 otherwise, they are stored in the .hgtags file, and a new
325 changeset is committed with the change.
325 changeset is committed with the change.
326
326
327 keyword arguments:
327 keyword arguments:
328
328
329 local: whether to store tags in non-version-controlled file
329 local: whether to store tags in non-version-controlled file
330 (default False)
330 (default False)
331
331
332 message: commit message to use if committing
332 message: commit message to use if committing
333
333
334 user: name of user to use if committing
334 user: name of user to use if committing
335
335
336 date: date tuple to use if committing'''
336 date: date tuple to use if committing'''
337
337
338 if not local:
338 if not local:
339 for x in self.status()[:5]:
339 for x in self.status()[:5]:
340 if '.hgtags' in x:
340 if '.hgtags' in x:
341 raise util.Abort(_('working copy of .hgtags is changed '
341 raise util.Abort(_('working copy of .hgtags is changed '
342 '(please commit .hgtags manually)'))
342 '(please commit .hgtags manually)'))
343
343
344 self.tags() # instantiate the cache
344 self.tags() # instantiate the cache
345 self._tag(names, node, message, local, user, date)
345 self._tag(names, node, message, local, user, date)
346
346
347 def tags(self):
347 def tags(self):
348 '''return a mapping of tag to node'''
348 '''return a mapping of tag to node'''
349 if self._tags is None:
349 if self._tags is None:
350 (self._tags, self._tagtypes) = self._findtags()
350 (self._tags, self._tagtypes) = self._findtags()
351
351
352 return self._tags
352 return self._tags
353
353
354 def _findtags(self):
354 def _findtags(self):
355 '''Do the hard work of finding tags. Return a pair of dicts
355 '''Do the hard work of finding tags. Return a pair of dicts
356 (tags, tagtypes) where tags maps tag name to node, and tagtypes
356 (tags, tagtypes) where tags maps tag name to node, and tagtypes
357 maps tag name to a string like \'global\' or \'local\'.
357 maps tag name to a string like \'global\' or \'local\'.
358 Subclasses or extensions are free to add their own tags, but
358 Subclasses or extensions are free to add their own tags, but
359 should be aware that the returned dicts will be retained for the
359 should be aware that the returned dicts will be retained for the
360 duration of the localrepo object.'''
360 duration of the localrepo object.'''
361
361
362 # XXX what tagtype should subclasses/extensions use? Currently
362 # XXX what tagtype should subclasses/extensions use? Currently
363 # mq and bookmarks add tags, but do not set the tagtype at all.
363 # mq and bookmarks add tags, but do not set the tagtype at all.
364 # Should each extension invent its own tag type? Should there
364 # Should each extension invent its own tag type? Should there
365 # be one tagtype for all such "virtual" tags? Or is the status
365 # be one tagtype for all such "virtual" tags? Or is the status
366 # quo fine?
366 # quo fine?
367
367
368 alltags = {} # map tag name to (node, hist)
368 alltags = {} # map tag name to (node, hist)
369 tagtypes = {}
369 tagtypes = {}
370
370
371 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
371 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
372 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
372 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
373
373
374 # Build the return dicts. Have to re-encode tag names because
374 # Build the return dicts. Have to re-encode tag names because
375 # the tags module always uses UTF-8 (in order not to lose info
375 # the tags module always uses UTF-8 (in order not to lose info
376 # writing to the cache), but the rest of Mercurial wants them in
376 # writing to the cache), but the rest of Mercurial wants them in
377 # local encoding.
377 # local encoding.
378 tags = {}
378 tags = {}
379 for (name, (node, hist)) in alltags.iteritems():
379 for (name, (node, hist)) in alltags.iteritems():
380 if node != nullid:
380 if node != nullid:
381 try:
381 try:
382 # ignore tags to unknown nodes
382 # ignore tags to unknown nodes
383 self.changelog.lookup(node)
383 self.changelog.lookup(node)
384 tags[encoding.tolocal(name)] = node
384 tags[encoding.tolocal(name)] = node
385 except error.LookupError:
385 except error.LookupError:
386 pass
386 pass
387 tags['tip'] = self.changelog.tip()
387 tags['tip'] = self.changelog.tip()
388 tagtypes = dict([(encoding.tolocal(name), value)
388 tagtypes = dict([(encoding.tolocal(name), value)
389 for (name, value) in tagtypes.iteritems()])
389 for (name, value) in tagtypes.iteritems()])
390 return (tags, tagtypes)
390 return (tags, tagtypes)
391
391
392 def tagtype(self, tagname):
392 def tagtype(self, tagname):
393 '''
393 '''
394 return the type of the given tag. result can be:
394 return the type of the given tag. result can be:
395
395
396 'local' : a local tag
396 'local' : a local tag
397 'global' : a global tag
397 'global' : a global tag
398 None : tag does not exist
398 None : tag does not exist
399 '''
399 '''
400
400
401 self.tags()
401 self.tags()
402
402
403 return self._tagtypes.get(tagname)
403 return self._tagtypes.get(tagname)
404
404
405 def tagslist(self):
405 def tagslist(self):
406 '''return a list of tags ordered by revision'''
406 '''return a list of tags ordered by revision'''
407 l = []
407 l = []
408 for t, n in self.tags().iteritems():
408 for t, n in self.tags().iteritems():
409 r = self.changelog.rev(n)
409 r = self.changelog.rev(n)
410 l.append((r, t, n))
410 l.append((r, t, n))
411 return [(t, n) for r, t, n in sorted(l)]
411 return [(t, n) for r, t, n in sorted(l)]
412
412
413 def nodetags(self, node):
413 def nodetags(self, node):
414 '''return the tags associated with a node'''
414 '''return the tags associated with a node'''
415 if not self.nodetagscache:
415 if not self.nodetagscache:
416 self.nodetagscache = {}
416 self.nodetagscache = {}
417 for t, n in self.tags().iteritems():
417 for t, n in self.tags().iteritems():
418 self.nodetagscache.setdefault(n, []).append(t)
418 self.nodetagscache.setdefault(n, []).append(t)
419 for tags in self.nodetagscache.itervalues():
419 for tags in self.nodetagscache.itervalues():
420 tags.sort()
420 tags.sort()
421 return self.nodetagscache.get(node, [])
421 return self.nodetagscache.get(node, [])
422
422
423 def nodebookmarks(self, node):
423 def nodebookmarks(self, node):
424 marks = []
424 marks = []
425 for bookmark, n in self._bookmarks.iteritems():
425 for bookmark, n in self._bookmarks.iteritems():
426 if n == node:
426 if n == node:
427 marks.append(bookmark)
427 marks.append(bookmark)
428 return sorted(marks)
428 return sorted(marks)
429
429
430 def _branchtags(self, partial, lrev):
430 def _branchtags(self, partial, lrev):
431 # TODO: rename this function?
431 # TODO: rename this function?
432 tiprev = len(self) - 1
432 tiprev = len(self) - 1
433 if lrev != tiprev:
433 if lrev != tiprev:
434 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
434 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
435 self._updatebranchcache(partial, ctxgen)
435 self._updatebranchcache(partial, ctxgen)
436 self._writebranchcache(partial, self.changelog.tip(), tiprev)
436 self._writebranchcache(partial, self.changelog.tip(), tiprev)
437
437
438 return partial
438 return partial
439
439
440 def updatebranchcache(self):
440 def updatebranchcache(self):
441 tip = self.changelog.tip()
441 tip = self.changelog.tip()
442 if self._branchcache is not None and self._branchcachetip == tip:
442 if self._branchcache is not None and self._branchcachetip == tip:
443 return self._branchcache
443 return self._branchcache
444
444
445 oldtip = self._branchcachetip
445 oldtip = self._branchcachetip
446 self._branchcachetip = tip
446 self._branchcachetip = tip
447 if oldtip is None or oldtip not in self.changelog.nodemap:
447 if oldtip is None or oldtip not in self.changelog.nodemap:
448 partial, last, lrev = self._readbranchcache()
448 partial, last, lrev = self._readbranchcache()
449 else:
449 else:
450 lrev = self.changelog.rev(oldtip)
450 lrev = self.changelog.rev(oldtip)
451 partial = self._branchcache
451 partial = self._branchcache
452
452
453 self._branchtags(partial, lrev)
453 self._branchtags(partial, lrev)
454 # this private cache holds all heads (not just tips)
454 # this private cache holds all heads (not just tips)
455 self._branchcache = partial
455 self._branchcache = partial
456
456
457 def branchmap(self):
457 def branchmap(self):
458 '''returns a dictionary {branch: [branchheads]}'''
458 '''returns a dictionary {branch: [branchheads]}'''
459 self.updatebranchcache()
459 self.updatebranchcache()
460 return self._branchcache
460 return self._branchcache
461
461
462 def branchtags(self):
462 def branchtags(self):
463 '''return a dict where branch names map to the tipmost head of
463 '''return a dict where branch names map to the tipmost head of
464 the branch, open heads come before closed'''
464 the branch, open heads come before closed'''
465 bt = {}
465 bt = {}
466 for bn, heads in self.branchmap().iteritems():
466 for bn, heads in self.branchmap().iteritems():
467 tip = heads[-1]
467 tip = heads[-1]
468 for h in reversed(heads):
468 for h in reversed(heads):
469 if 'close' not in self.changelog.read(h)[5]:
469 if 'close' not in self.changelog.read(h)[5]:
470 tip = h
470 tip = h
471 break
471 break
472 bt[bn] = tip
472 bt[bn] = tip
473 return bt
473 return bt
474
474
475 def _readbranchcache(self):
475 def _readbranchcache(self):
476 partial = {}
476 partial = {}
477 try:
477 try:
478 f = self.opener("cache/branchheads")
478 f = self.opener("cache/branchheads")
479 lines = f.read().split('\n')
479 lines = f.read().split('\n')
480 f.close()
480 f.close()
481 except (IOError, OSError):
481 except (IOError, OSError):
482 return {}, nullid, nullrev
482 return {}, nullid, nullrev
483
483
484 try:
484 try:
485 last, lrev = lines.pop(0).split(" ", 1)
485 last, lrev = lines.pop(0).split(" ", 1)
486 last, lrev = bin(last), int(lrev)
486 last, lrev = bin(last), int(lrev)
487 if lrev >= len(self) or self[lrev].node() != last:
487 if lrev >= len(self) or self[lrev].node() != last:
488 # invalidate the cache
488 # invalidate the cache
489 raise ValueError('invalidating branch cache (tip differs)')
489 raise ValueError('invalidating branch cache (tip differs)')
490 for l in lines:
490 for l in lines:
491 if not l:
491 if not l:
492 continue
492 continue
493 node, label = l.split(" ", 1)
493 node, label = l.split(" ", 1)
494 label = encoding.tolocal(label.strip())
494 label = encoding.tolocal(label.strip())
495 partial.setdefault(label, []).append(bin(node))
495 partial.setdefault(label, []).append(bin(node))
496 except KeyboardInterrupt:
496 except KeyboardInterrupt:
497 raise
497 raise
498 except Exception, inst:
498 except Exception, inst:
499 if self.ui.debugflag:
499 if self.ui.debugflag:
500 self.ui.warn(str(inst), '\n')
500 self.ui.warn(str(inst), '\n')
501 partial, last, lrev = {}, nullid, nullrev
501 partial, last, lrev = {}, nullid, nullrev
502 return partial, last, lrev
502 return partial, last, lrev
503
503
504 def _writebranchcache(self, branches, tip, tiprev):
504 def _writebranchcache(self, branches, tip, tiprev):
505 try:
505 try:
506 f = self.opener("cache/branchheads", "w", atomictemp=True)
506 f = self.opener("cache/branchheads", "w", atomictemp=True)
507 f.write("%s %s\n" % (hex(tip), tiprev))
507 f.write("%s %s\n" % (hex(tip), tiprev))
508 for label, nodes in branches.iteritems():
508 for label, nodes in branches.iteritems():
509 for node in nodes:
509 for node in nodes:
510 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
510 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
511 f.rename()
511 f.rename()
512 except (IOError, OSError):
512 except (IOError, OSError):
513 pass
513 pass
514
514
515 def _updatebranchcache(self, partial, ctxgen):
515 def _updatebranchcache(self, partial, ctxgen):
516 # collect new branch entries
516 # collect new branch entries
517 newbranches = {}
517 newbranches = {}
518 for c in ctxgen:
518 for c in ctxgen:
519 newbranches.setdefault(c.branch(), []).append(c.node())
519 newbranches.setdefault(c.branch(), []).append(c.node())
520 # if older branchheads are reachable from new ones, they aren't
520 # if older branchheads are reachable from new ones, they aren't
521 # really branchheads. Note checking parents is insufficient:
521 # really branchheads. Note checking parents is insufficient:
522 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
522 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
523 for branch, newnodes in newbranches.iteritems():
523 for branch, newnodes in newbranches.iteritems():
524 bheads = partial.setdefault(branch, [])
524 bheads = partial.setdefault(branch, [])
525 bheads.extend(newnodes)
525 bheads.extend(newnodes)
526 if len(bheads) <= 1:
526 if len(bheads) <= 1:
527 continue
527 continue
528 bheads = sorted(bheads, key=lambda x: self[x].rev())
528 bheads = sorted(bheads, key=lambda x: self[x].rev())
529 # starting from tip means fewer passes over reachable
529 # starting from tip means fewer passes over reachable
530 while newnodes:
530 while newnodes:
531 latest = newnodes.pop()
531 latest = newnodes.pop()
532 if latest not in bheads:
532 if latest not in bheads:
533 continue
533 continue
534 minbhrev = self[bheads[0]].node()
534 minbhrev = self[bheads[0]].node()
535 reachable = self.changelog.reachable(latest, minbhrev)
535 reachable = self.changelog.reachable(latest, minbhrev)
536 reachable.remove(latest)
536 reachable.remove(latest)
537 if reachable:
537 if reachable:
538 bheads = [b for b in bheads if b not in reachable]
538 bheads = [b for b in bheads if b not in reachable]
539 partial[branch] = bheads
539 partial[branch] = bheads
540
540
541 def lookup(self, key):
541 def lookup(self, key):
542 if isinstance(key, int):
542 if isinstance(key, int):
543 return self.changelog.node(key)
543 return self.changelog.node(key)
544 elif key == '.':
544 elif key == '.':
545 return self.dirstate.p1()
545 return self.dirstate.p1()
546 elif key == 'null':
546 elif key == 'null':
547 return nullid
547 return nullid
548 elif key == 'tip':
548 elif key == 'tip':
549 return self.changelog.tip()
549 return self.changelog.tip()
550 n = self.changelog._match(key)
550 n = self.changelog._match(key)
551 if n:
551 if n:
552 return n
552 return n
553 if key in self._bookmarks:
553 if key in self._bookmarks:
554 return self._bookmarks[key]
554 return self._bookmarks[key]
555 if key in self.tags():
555 if key in self.tags():
556 return self.tags()[key]
556 return self.tags()[key]
557 if key in self.branchtags():
557 if key in self.branchtags():
558 return self.branchtags()[key]
558 return self.branchtags()[key]
559 n = self.changelog._partialmatch(key)
559 n = self.changelog._partialmatch(key)
560 if n:
560 if n:
561 return n
561 return n
562
562
563 # can't find key, check if it might have come from damaged dirstate
563 # can't find key, check if it might have come from damaged dirstate
564 if key in self.dirstate.parents():
564 if key in self.dirstate.parents():
565 raise error.Abort(_("working directory has unknown parent '%s'!")
565 raise error.Abort(_("working directory has unknown parent '%s'!")
566 % short(key))
566 % short(key))
567 try:
567 try:
568 if len(key) == 20:
568 if len(key) == 20:
569 key = hex(key)
569 key = hex(key)
570 except TypeError:
570 except TypeError:
571 pass
571 pass
572 raise error.RepoLookupError(_("unknown revision '%s'") % key)
572 raise error.RepoLookupError(_("unknown revision '%s'") % key)
573
573
574 def lookupbranch(self, key, remote=None):
574 def lookupbranch(self, key, remote=None):
575 repo = remote or self
575 repo = remote or self
576 if key in repo.branchmap():
576 if key in repo.branchmap():
577 return key
577 return key
578
578
579 repo = (remote and remote.local()) and remote or self
579 repo = (remote and remote.local()) and remote or self
580 return repo[key].branch()
580 return repo[key].branch()
581
581
582 def known(self, nodes):
582 def known(self, nodes):
583 nm = self.changelog.nodemap
583 nm = self.changelog.nodemap
584 return [(n in nm) for n in nodes]
584 return [(n in nm) for n in nodes]
585
585
586 def local(self):
586 def local(self):
587 return self
587 return self
588
588
589 def join(self, f):
589 def join(self, f):
590 return os.path.join(self.path, f)
590 return os.path.join(self.path, f)
591
591
592 def wjoin(self, f):
592 def wjoin(self, f):
593 return os.path.join(self.root, f)
593 return os.path.join(self.root, f)
594
594
595 def file(self, f):
595 def file(self, f):
596 if f[0] == '/':
596 if f[0] == '/':
597 f = f[1:]
597 f = f[1:]
598 return filelog.filelog(self.sopener, f)
598 return filelog.filelog(self.sopener, f)
599
599
600 def changectx(self, changeid):
600 def changectx(self, changeid):
601 return self[changeid]
601 return self[changeid]
602
602
603 def parents(self, changeid=None):
603 def parents(self, changeid=None):
604 '''get list of changectxs for parents of changeid'''
604 '''get list of changectxs for parents of changeid'''
605 return self[changeid].parents()
605 return self[changeid].parents()
606
606
607 def filectx(self, path, changeid=None, fileid=None):
607 def filectx(self, path, changeid=None, fileid=None):
608 """changeid can be a changeset revision, node, or tag.
608 """changeid can be a changeset revision, node, or tag.
609 fileid can be a file revision or node."""
609 fileid can be a file revision or node."""
610 return context.filectx(self, path, changeid, fileid)
610 return context.filectx(self, path, changeid, fileid)
611
611
612 def getcwd(self):
612 def getcwd(self):
613 return self.dirstate.getcwd()
613 return self.dirstate.getcwd()
614
614
615 def pathto(self, f, cwd=None):
615 def pathto(self, f, cwd=None):
616 return self.dirstate.pathto(f, cwd)
616 return self.dirstate.pathto(f, cwd)
617
617
618 def wfile(self, f, mode='r'):
618 def wfile(self, f, mode='r'):
619 return self.wopener(f, mode)
619 return self.wopener(f, mode)
620
620
621 def _link(self, f):
621 def _link(self, f):
622 return os.path.islink(self.wjoin(f))
622 return os.path.islink(self.wjoin(f))
623
623
624 def _loadfilter(self, filter):
624 def _loadfilter(self, filter):
625 if filter not in self.filterpats:
625 if filter not in self.filterpats:
626 l = []
626 l = []
627 for pat, cmd in self.ui.configitems(filter):
627 for pat, cmd in self.ui.configitems(filter):
628 if cmd == '!':
628 if cmd == '!':
629 continue
629 continue
630 mf = matchmod.match(self.root, '', [pat])
630 mf = matchmod.match(self.root, '', [pat])
631 fn = None
631 fn = None
632 params = cmd
632 params = cmd
633 for name, filterfn in self._datafilters.iteritems():
633 for name, filterfn in self._datafilters.iteritems():
634 if cmd.startswith(name):
634 if cmd.startswith(name):
635 fn = filterfn
635 fn = filterfn
636 params = cmd[len(name):].lstrip()
636 params = cmd[len(name):].lstrip()
637 break
637 break
638 if not fn:
638 if not fn:
639 fn = lambda s, c, **kwargs: util.filter(s, c)
639 fn = lambda s, c, **kwargs: util.filter(s, c)
640 # Wrap old filters not supporting keyword arguments
640 # Wrap old filters not supporting keyword arguments
641 if not inspect.getargspec(fn)[2]:
641 if not inspect.getargspec(fn)[2]:
642 oldfn = fn
642 oldfn = fn
643 fn = lambda s, c, **kwargs: oldfn(s, c)
643 fn = lambda s, c, **kwargs: oldfn(s, c)
644 l.append((mf, fn, params))
644 l.append((mf, fn, params))
645 self.filterpats[filter] = l
645 self.filterpats[filter] = l
646 return self.filterpats[filter]
646 return self.filterpats[filter]
647
647
648 def _filter(self, filterpats, filename, data):
648 def _filter(self, filterpats, filename, data):
649 for mf, fn, cmd in filterpats:
649 for mf, fn, cmd in filterpats:
650 if mf(filename):
650 if mf(filename):
651 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
651 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
652 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
652 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
653 break
653 break
654
654
655 return data
655 return data
656
656
657 @propertycache
657 @propertycache
658 def _encodefilterpats(self):
658 def _encodefilterpats(self):
659 return self._loadfilter('encode')
659 return self._loadfilter('encode')
660
660
661 @propertycache
661 @propertycache
662 def _decodefilterpats(self):
662 def _decodefilterpats(self):
663 return self._loadfilter('decode')
663 return self._loadfilter('decode')
664
664
665 def adddatafilter(self, name, filter):
665 def adddatafilter(self, name, filter):
666 self._datafilters[name] = filter
666 self._datafilters[name] = filter
667
667
668 def wread(self, filename):
668 def wread(self, filename):
669 if self._link(filename):
669 if self._link(filename):
670 data = os.readlink(self.wjoin(filename))
670 data = os.readlink(self.wjoin(filename))
671 else:
671 else:
672 data = self.wopener.read(filename)
672 data = self.wopener.read(filename)
673 return self._filter(self._encodefilterpats, filename, data)
673 return self._filter(self._encodefilterpats, filename, data)
674
674
675 def wwrite(self, filename, data, flags):
675 def wwrite(self, filename, data, flags):
676 data = self._filter(self._decodefilterpats, filename, data)
676 data = self._filter(self._decodefilterpats, filename, data)
677 if 'l' in flags:
677 if 'l' in flags:
678 self.wopener.symlink(data, filename)
678 self.wopener.symlink(data, filename)
679 else:
679 else:
680 self.wopener.write(filename, data)
680 self.wopener.write(filename, data)
681 if 'x' in flags:
681 if 'x' in flags:
682 util.setflags(self.wjoin(filename), False, True)
682 util.setflags(self.wjoin(filename), False, True)
683
683
684 def wwritedata(self, filename, data):
684 def wwritedata(self, filename, data):
685 return self._filter(self._decodefilterpats, filename, data)
685 return self._filter(self._decodefilterpats, filename, data)
686
686
687 def transaction(self, desc):
687 def transaction(self, desc):
688 tr = self._transref and self._transref() or None
688 tr = self._transref and self._transref() or None
689 if tr and tr.running():
689 if tr and tr.running():
690 return tr.nest()
690 return tr.nest()
691
691
692 # abort here if the journal already exists
692 # abort here if the journal already exists
693 if os.path.exists(self.sjoin("journal")):
693 if os.path.exists(self.sjoin("journal")):
694 raise error.RepoError(
694 raise error.RepoError(
695 _("abandoned transaction found - run hg recover"))
695 _("abandoned transaction found - run hg recover"))
696
696
697 journalfiles = self._writejournal(desc)
697 journalfiles = self._writejournal(desc)
698 renames = [(x, undoname(x)) for x in journalfiles]
698 renames = [(x, undoname(x)) for x in journalfiles]
699
699
700 tr = transaction.transaction(self.ui.warn, self.sopener,
700 tr = transaction.transaction(self.ui.warn, self.sopener,
701 self.sjoin("journal"),
701 self.sjoin("journal"),
702 aftertrans(renames),
702 aftertrans(renames),
703 self.store.createmode)
703 self.store.createmode)
704 self._transref = weakref.ref(tr)
704 self._transref = weakref.ref(tr)
705 return tr
705 return tr
706
706
707 def _writejournal(self, desc):
707 def _writejournal(self, desc):
708 # save dirstate for rollback
708 # save dirstate for rollback
709 try:
709 try:
710 ds = self.opener.read("dirstate")
710 ds = self.opener.read("dirstate")
711 except IOError:
711 except IOError:
712 ds = ""
712 ds = ""
713 self.opener.write("journal.dirstate", ds)
713 self.opener.write("journal.dirstate", ds)
714 self.opener.write("journal.branch",
714 self.opener.write("journal.branch",
715 encoding.fromlocal(self.dirstate.branch()))
715 encoding.fromlocal(self.dirstate.branch()))
716 self.opener.write("journal.desc",
716 self.opener.write("journal.desc",
717 "%d\n%s\n" % (len(self), desc))
717 "%d\n%s\n" % (len(self), desc))
718
718
719 bkname = self.join('bookmarks')
719 bkname = self.join('bookmarks')
720 if os.path.exists(bkname):
720 if os.path.exists(bkname):
721 util.copyfile(bkname, self.join('journal.bookmarks'))
721 util.copyfile(bkname, self.join('journal.bookmarks'))
722 else:
722 else:
723 self.opener.write('journal.bookmarks', '')
723 self.opener.write('journal.bookmarks', '')
724
724
725 return (self.sjoin('journal'), self.join('journal.dirstate'),
725 return (self.sjoin('journal'), self.join('journal.dirstate'),
726 self.join('journal.branch'), self.join('journal.desc'),
726 self.join('journal.branch'), self.join('journal.desc'),
727 self.join('journal.bookmarks'))
727 self.join('journal.bookmarks'))
728
728
729 def recover(self):
729 def recover(self):
730 lock = self.lock()
730 lock = self.lock()
731 try:
731 try:
732 if os.path.exists(self.sjoin("journal")):
732 if os.path.exists(self.sjoin("journal")):
733 self.ui.status(_("rolling back interrupted transaction\n"))
733 self.ui.status(_("rolling back interrupted transaction\n"))
734 transaction.rollback(self.sopener, self.sjoin("journal"),
734 transaction.rollback(self.sopener, self.sjoin("journal"),
735 self.ui.warn)
735 self.ui.warn)
736 self.invalidate()
736 self.invalidate()
737 return True
737 return True
738 else:
738 else:
739 self.ui.warn(_("no interrupted transaction available\n"))
739 self.ui.warn(_("no interrupted transaction available\n"))
740 return False
740 return False
741 finally:
741 finally:
742 lock.release()
742 lock.release()
743
743
744 def rollback(self, dryrun=False):
744 def rollback(self, dryrun=False):
745 wlock = lock = None
745 wlock = lock = None
746 try:
746 try:
747 wlock = self.wlock()
747 wlock = self.wlock()
748 lock = self.lock()
748 lock = self.lock()
749 if os.path.exists(self.sjoin("undo")):
749 if os.path.exists(self.sjoin("undo")):
750 try:
750 try:
751 args = self.opener.read("undo.desc").splitlines()
751 args = self.opener.read("undo.desc").splitlines()
752 if len(args) >= 3 and self.ui.verbose:
752 if len(args) >= 3 and self.ui.verbose:
753 desc = _("repository tip rolled back to revision %s"
753 desc = _("repository tip rolled back to revision %s"
754 " (undo %s: %s)\n") % (
754 " (undo %s: %s)\n") % (
755 int(args[0]) - 1, args[1], args[2])
755 int(args[0]) - 1, args[1], args[2])
756 elif len(args) >= 2:
756 elif len(args) >= 2:
757 desc = _("repository tip rolled back to revision %s"
757 desc = _("repository tip rolled back to revision %s"
758 " (undo %s)\n") % (
758 " (undo %s)\n") % (
759 int(args[0]) - 1, args[1])
759 int(args[0]) - 1, args[1])
760 except IOError:
760 except IOError:
761 desc = _("rolling back unknown transaction\n")
761 desc = _("rolling back unknown transaction\n")
762 self.ui.status(desc)
762 self.ui.status(desc)
763 if dryrun:
763 if dryrun:
764 return
764 return
765 transaction.rollback(self.sopener, self.sjoin("undo"),
765 transaction.rollback(self.sopener, self.sjoin("undo"),
766 self.ui.warn)
766 self.ui.warn)
767 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
767 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
768 if os.path.exists(self.join('undo.bookmarks')):
768 if os.path.exists(self.join('undo.bookmarks')):
769 util.rename(self.join('undo.bookmarks'),
769 util.rename(self.join('undo.bookmarks'),
770 self.join('bookmarks'))
770 self.join('bookmarks'))
771 try:
771 try:
772 branch = self.opener.read("undo.branch")
772 branch = self.opener.read("undo.branch")
773 self.dirstate.setbranch(branch)
773 self.dirstate.setbranch(branch)
774 except IOError:
774 except IOError:
775 self.ui.warn(_("named branch could not be reset, "
775 self.ui.warn(_("named branch could not be reset, "
776 "current branch is still: %s\n")
776 "current branch is still: %s\n")
777 % self.dirstate.branch())
777 % self.dirstate.branch())
778 self.invalidate()
778 self.invalidate()
779 self.dirstate.invalidate()
779 self.dirstate.invalidate()
780 self.destroyed()
780 self.destroyed()
781 parents = tuple([p.rev() for p in self.parents()])
781 parents = tuple([p.rev() for p in self.parents()])
782 if len(parents) > 1:
782 if len(parents) > 1:
783 self.ui.status(_("working directory now based on "
783 self.ui.status(_("working directory now based on "
784 "revisions %d and %d\n") % parents)
784 "revisions %d and %d\n") % parents)
785 else:
785 else:
786 self.ui.status(_("working directory now based on "
786 self.ui.status(_("working directory now based on "
787 "revision %d\n") % parents)
787 "revision %d\n") % parents)
788 else:
788 else:
789 self.ui.warn(_("no rollback information available\n"))
789 self.ui.warn(_("no rollback information available\n"))
790 return 1
790 return 1
791 finally:
791 finally:
792 release(lock, wlock)
792 release(lock, wlock)
793
793
794 def invalidatecaches(self):
794 def invalidatecaches(self):
795 self._tags = None
795 self._tags = None
796 self._tagtypes = None
796 self._tagtypes = None
797 self.nodetagscache = None
797 self.nodetagscache = None
798 self._branchcache = None # in UTF-8
798 self._branchcache = None # in UTF-8
799 self._branchcachetip = None
799 self._branchcachetip = None
800
800
801 def invalidatedirstate(self):
801 def invalidatedirstate(self):
802 '''Invalidates the dirstate, causing the next call to dirstate
802 '''Invalidates the dirstate, causing the next call to dirstate
803 to check if it was modified since the last time it was read,
803 to check if it was modified since the last time it was read,
804 rereading it if it has.
804 rereading it if it has.
805
805
806 This is different to dirstate.invalidate() that it doesn't always
806 This is different to dirstate.invalidate() that it doesn't always
807 rereads the dirstate. Use dirstate.invalidate() if you want to
807 rereads the dirstate. Use dirstate.invalidate() if you want to
808 explicitly read the dirstate again (i.e. restoring it to a previous
808 explicitly read the dirstate again (i.e. restoring it to a previous
809 known good state).'''
809 known good state).'''
810 try:
810 try:
811 delattr(self, 'dirstate')
811 delattr(self, 'dirstate')
812 except AttributeError:
812 except AttributeError:
813 pass
813 pass
814
814
815 def invalidate(self):
815 def invalidate(self):
816 for a in ("changelog", "manifest", "_bookmarks", "_bookmarkcurrent"):
816 for a in ("changelog", "manifest", "_bookmarks", "_bookmarkcurrent"):
817 if a in self.__dict__:
817 if a in self.__dict__:
818 delattr(self, a)
818 delattr(self, a)
819 self.invalidatecaches()
819 self.invalidatecaches()
820
820
821 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
821 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
822 try:
822 try:
823 l = lock.lock(lockname, 0, releasefn, desc=desc)
823 l = lock.lock(lockname, 0, releasefn, desc=desc)
824 except error.LockHeld, inst:
824 except error.LockHeld, inst:
825 if not wait:
825 if not wait:
826 raise
826 raise
827 self.ui.warn(_("waiting for lock on %s held by %r\n") %
827 self.ui.warn(_("waiting for lock on %s held by %r\n") %
828 (desc, inst.locker))
828 (desc, inst.locker))
829 # default to 600 seconds timeout
829 # default to 600 seconds timeout
830 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
830 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
831 releasefn, desc=desc)
831 releasefn, desc=desc)
832 if acquirefn:
832 if acquirefn:
833 acquirefn()
833 acquirefn()
834 return l
834 return l
835
835
836 def lock(self, wait=True):
836 def lock(self, wait=True):
837 '''Lock the repository store (.hg/store) and return a weak reference
837 '''Lock the repository store (.hg/store) and return a weak reference
838 to the lock. Use this before modifying the store (e.g. committing or
838 to the lock. Use this before modifying the store (e.g. committing or
839 stripping). If you are opening a transaction, get a lock as well.)'''
839 stripping). If you are opening a transaction, get a lock as well.)'''
840 l = self._lockref and self._lockref()
840 l = self._lockref and self._lockref()
841 if l is not None and l.held:
841 if l is not None and l.held:
842 l.lock()
842 l.lock()
843 return l
843 return l
844
844
845 def unlock():
845 def unlock():
846 self.store.write()
846 self.store.write()
847 for k, ce in self._filecache.items():
847 for k, ce in self._filecache.items():
848 if k == 'dirstate':
848 if k == 'dirstate':
849 continue
849 continue
850 ce.refresh()
850 ce.refresh()
851
851
852 l = self._lock(self.sjoin("lock"), wait, unlock,
852 l = self._lock(self.sjoin("lock"), wait, unlock,
853 self.invalidate, _('repository %s') % self.origroot)
853 self.invalidate, _('repository %s') % self.origroot)
854 self._lockref = weakref.ref(l)
854 self._lockref = weakref.ref(l)
855 return l
855 return l
856
856
857 def wlock(self, wait=True):
857 def wlock(self, wait=True):
858 '''Lock the non-store parts of the repository (everything under
858 '''Lock the non-store parts of the repository (everything under
859 .hg except .hg/store) and return a weak reference to the lock.
859 .hg except .hg/store) and return a weak reference to the lock.
860 Use this before modifying files in .hg.'''
860 Use this before modifying files in .hg.'''
861 l = self._wlockref and self._wlockref()
861 l = self._wlockref and self._wlockref()
862 if l is not None and l.held:
862 if l is not None and l.held:
863 l.lock()
863 l.lock()
864 return l
864 return l
865
865
866 def unlock():
866 def unlock():
867 self.dirstate.write()
867 self.dirstate.write()
868 ce = self._filecache.get('dirstate')
868 ce = self._filecache.get('dirstate')
869 if ce:
869 if ce:
870 ce.refresh()
870 ce.refresh()
871
871
872 l = self._lock(self.join("wlock"), wait, unlock,
872 l = self._lock(self.join("wlock"), wait, unlock,
873 self.invalidatedirstate, _('working directory of %s') %
873 self.invalidatedirstate, _('working directory of %s') %
874 self.origroot)
874 self.origroot)
875 self._wlockref = weakref.ref(l)
875 self._wlockref = weakref.ref(l)
876 return l
876 return l
877
877
878 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
878 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
879 """
879 """
880 commit an individual file as part of a larger transaction
880 commit an individual file as part of a larger transaction
881 """
881 """
882
882
883 fname = fctx.path()
883 fname = fctx.path()
884 text = fctx.data()
884 text = fctx.data()
885 flog = self.file(fname)
885 flog = self.file(fname)
886 fparent1 = manifest1.get(fname, nullid)
886 fparent1 = manifest1.get(fname, nullid)
887 fparent2 = fparent2o = manifest2.get(fname, nullid)
887 fparent2 = fparent2o = manifest2.get(fname, nullid)
888
888
889 meta = {}
889 meta = {}
890 copy = fctx.renamed()
890 copy = fctx.renamed()
891 if copy and copy[0] != fname:
891 if copy and copy[0] != fname:
892 # Mark the new revision of this file as a copy of another
892 # Mark the new revision of this file as a copy of another
893 # file. This copy data will effectively act as a parent
893 # file. This copy data will effectively act as a parent
894 # of this new revision. If this is a merge, the first
894 # of this new revision. If this is a merge, the first
895 # parent will be the nullid (meaning "look up the copy data")
895 # parent will be the nullid (meaning "look up the copy data")
896 # and the second one will be the other parent. For example:
896 # and the second one will be the other parent. For example:
897 #
897 #
898 # 0 --- 1 --- 3 rev1 changes file foo
898 # 0 --- 1 --- 3 rev1 changes file foo
899 # \ / rev2 renames foo to bar and changes it
899 # \ / rev2 renames foo to bar and changes it
900 # \- 2 -/ rev3 should have bar with all changes and
900 # \- 2 -/ rev3 should have bar with all changes and
901 # should record that bar descends from
901 # should record that bar descends from
902 # bar in rev2 and foo in rev1
902 # bar in rev2 and foo in rev1
903 #
903 #
904 # this allows this merge to succeed:
904 # this allows this merge to succeed:
905 #
905 #
906 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
906 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
907 # \ / merging rev3 and rev4 should use bar@rev2
907 # \ / merging rev3 and rev4 should use bar@rev2
908 # \- 2 --- 4 as the merge base
908 # \- 2 --- 4 as the merge base
909 #
909 #
910
910
911 cfname = copy[0]
911 cfname = copy[0]
912 crev = manifest1.get(cfname)
912 crev = manifest1.get(cfname)
913 newfparent = fparent2
913 newfparent = fparent2
914
914
915 if manifest2: # branch merge
915 if manifest2: # branch merge
916 if fparent2 == nullid or crev is None: # copied on remote side
916 if fparent2 == nullid or crev is None: # copied on remote side
917 if cfname in manifest2:
917 if cfname in manifest2:
918 crev = manifest2[cfname]
918 crev = manifest2[cfname]
919 newfparent = fparent1
919 newfparent = fparent1
920
920
921 # find source in nearest ancestor if we've lost track
921 # find source in nearest ancestor if we've lost track
922 if not crev:
922 if not crev:
923 self.ui.debug(" %s: searching for copy revision for %s\n" %
923 self.ui.debug(" %s: searching for copy revision for %s\n" %
924 (fname, cfname))
924 (fname, cfname))
925 for ancestor in self[None].ancestors():
925 for ancestor in self[None].ancestors():
926 if cfname in ancestor:
926 if cfname in ancestor:
927 crev = ancestor[cfname].filenode()
927 crev = ancestor[cfname].filenode()
928 break
928 break
929
929
930 if crev:
930 if crev:
931 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
931 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
932 meta["copy"] = cfname
932 meta["copy"] = cfname
933 meta["copyrev"] = hex(crev)
933 meta["copyrev"] = hex(crev)
934 fparent1, fparent2 = nullid, newfparent
934 fparent1, fparent2 = nullid, newfparent
935 else:
935 else:
936 self.ui.warn(_("warning: can't find ancestor for '%s' "
936 self.ui.warn(_("warning: can't find ancestor for '%s' "
937 "copied from '%s'!\n") % (fname, cfname))
937 "copied from '%s'!\n") % (fname, cfname))
938
938
939 elif fparent2 != nullid:
939 elif fparent2 != nullid:
940 # is one parent an ancestor of the other?
940 # is one parent an ancestor of the other?
941 fparentancestor = flog.ancestor(fparent1, fparent2)
941 fparentancestor = flog.ancestor(fparent1, fparent2)
942 if fparentancestor == fparent1:
942 if fparentancestor == fparent1:
943 fparent1, fparent2 = fparent2, nullid
943 fparent1, fparent2 = fparent2, nullid
944 elif fparentancestor == fparent2:
944 elif fparentancestor == fparent2:
945 fparent2 = nullid
945 fparent2 = nullid
946
946
947 # is the file changed?
947 # is the file changed?
948 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
948 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
949 changelist.append(fname)
949 changelist.append(fname)
950 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
950 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
951
951
952 # are just the flags changed during merge?
952 # are just the flags changed during merge?
953 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
953 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
954 changelist.append(fname)
954 changelist.append(fname)
955
955
956 return fparent1
956 return fparent1
957
957
958 def commit(self, text="", user=None, date=None, match=None, force=False,
958 def commit(self, text="", user=None, date=None, match=None, force=False,
959 editor=False, extra={}):
959 editor=False, extra={}):
960 """Add a new revision to current repository.
960 """Add a new revision to current repository.
961
961
962 Revision information is gathered from the working directory,
962 Revision information is gathered from the working directory,
963 match can be used to filter the committed files. If editor is
963 match can be used to filter the committed files. If editor is
964 supplied, it is called to get a commit message.
964 supplied, it is called to get a commit message.
965 """
965 """
966
966
967 def fail(f, msg):
967 def fail(f, msg):
968 raise util.Abort('%s: %s' % (f, msg))
968 raise util.Abort('%s: %s' % (f, msg))
969
969
970 if not match:
970 if not match:
971 match = matchmod.always(self.root, '')
971 match = matchmod.always(self.root, '')
972
972
973 if not force:
973 if not force:
974 vdirs = []
974 vdirs = []
975 match.dir = vdirs.append
975 match.dir = vdirs.append
976 match.bad = fail
976 match.bad = fail
977
977
978 wlock = self.wlock()
978 wlock = self.wlock()
979 try:
979 try:
980 wctx = self[None]
980 wctx = self[None]
981 merge = len(wctx.parents()) > 1
981 merge = len(wctx.parents()) > 1
982
982
983 if (not force and merge and match and
983 if (not force and merge and match and
984 (match.files() or match.anypats())):
984 (match.files() or match.anypats())):
985 raise util.Abort(_('cannot partially commit a merge '
985 raise util.Abort(_('cannot partially commit a merge '
986 '(do not specify files or patterns)'))
986 '(do not specify files or patterns)'))
987
987
988 changes = self.status(match=match, clean=force)
988 changes = self.status(match=match, clean=force)
989 if force:
989 if force:
990 changes[0].extend(changes[6]) # mq may commit unchanged files
990 changes[0].extend(changes[6]) # mq may commit unchanged files
991
991
992 # check subrepos
992 # check subrepos
993 subs = []
993 subs = []
994 removedsubs = set()
994 removedsubs = set()
995 if '.hgsub' in wctx:
995 if '.hgsub' in wctx:
996 # only manage subrepos and .hgsubstate if .hgsub is present
996 # only manage subrepos and .hgsubstate if .hgsub is present
997 for p in wctx.parents():
997 for p in wctx.parents():
998 removedsubs.update(s for s in p.substate if match(s))
998 removedsubs.update(s for s in p.substate if match(s))
999 for s in wctx.substate:
999 for s in wctx.substate:
1000 removedsubs.discard(s)
1000 removedsubs.discard(s)
1001 if match(s) and wctx.sub(s).dirty():
1001 if match(s) and wctx.sub(s).dirty():
1002 subs.append(s)
1002 subs.append(s)
1003 if (subs or removedsubs):
1003 if (subs or removedsubs):
1004 if (not match('.hgsub') and
1004 if (not match('.hgsub') and
1005 '.hgsub' in (wctx.modified() + wctx.added())):
1005 '.hgsub' in (wctx.modified() + wctx.added())):
1006 raise util.Abort(
1006 raise util.Abort(
1007 _("can't commit subrepos without .hgsub"))
1007 _("can't commit subrepos without .hgsub"))
1008 if '.hgsubstate' not in changes[0]:
1008 if '.hgsubstate' not in changes[0]:
1009 changes[0].insert(0, '.hgsubstate')
1009 changes[0].insert(0, '.hgsubstate')
1010 if '.hgsubstate' in changes[2]:
1010 if '.hgsubstate' in changes[2]:
1011 changes[2].remove('.hgsubstate')
1011 changes[2].remove('.hgsubstate')
1012 elif '.hgsub' in changes[2]:
1012 elif '.hgsub' in changes[2]:
1013 # clean up .hgsubstate when .hgsub is removed
1013 # clean up .hgsubstate when .hgsub is removed
1014 if ('.hgsubstate' in wctx and
1014 if ('.hgsubstate' in wctx and
1015 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1015 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1016 changes[2].insert(0, '.hgsubstate')
1016 changes[2].insert(0, '.hgsubstate')
1017
1017
1018 if subs and not self.ui.configbool('ui', 'commitsubrepos', True):
1018 if subs and not self.ui.configbool('ui', 'commitsubrepos', True):
1019 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
1019 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
1020 if changedsubs:
1020 if changedsubs:
1021 raise util.Abort(_("uncommitted changes in subrepo %s")
1021 raise util.Abort(_("uncommitted changes in subrepo %s")
1022 % changedsubs[0])
1022 % changedsubs[0])
1023
1023
1024 # make sure all explicit patterns are matched
1024 # make sure all explicit patterns are matched
1025 if not force and match.files():
1025 if not force and match.files():
1026 matched = set(changes[0] + changes[1] + changes[2])
1026 matched = set(changes[0] + changes[1] + changes[2])
1027
1027
1028 for f in match.files():
1028 for f in match.files():
1029 if f == '.' or f in matched or f in wctx.substate:
1029 if f == '.' or f in matched or f in wctx.substate:
1030 continue
1030 continue
1031 if f in changes[3]: # missing
1031 if f in changes[3]: # missing
1032 fail(f, _('file not found!'))
1032 fail(f, _('file not found!'))
1033 if f in vdirs: # visited directory
1033 if f in vdirs: # visited directory
1034 d = f + '/'
1034 d = f + '/'
1035 for mf in matched:
1035 for mf in matched:
1036 if mf.startswith(d):
1036 if mf.startswith(d):
1037 break
1037 break
1038 else:
1038 else:
1039 fail(f, _("no match under directory!"))
1039 fail(f, _("no match under directory!"))
1040 elif f not in self.dirstate:
1040 elif f not in self.dirstate:
1041 fail(f, _("file not tracked!"))
1041 fail(f, _("file not tracked!"))
1042
1042
1043 if (not force and not extra.get("close") and not merge
1043 if (not force and not extra.get("close") and not merge
1044 and not (changes[0] or changes[1] or changes[2])
1044 and not (changes[0] or changes[1] or changes[2])
1045 and wctx.branch() == wctx.p1().branch()):
1045 and wctx.branch() == wctx.p1().branch()):
1046 return None
1046 return None
1047
1047
1048 ms = mergemod.mergestate(self)
1048 ms = mergemod.mergestate(self)
1049 for f in changes[0]:
1049 for f in changes[0]:
1050 if f in ms and ms[f] == 'u':
1050 if f in ms and ms[f] == 'u':
1051 raise util.Abort(_("unresolved merge conflicts "
1051 raise util.Abort(_("unresolved merge conflicts "
1052 "(see hg help resolve)"))
1052 "(see hg help resolve)"))
1053
1053
1054 cctx = context.workingctx(self, text, user, date, extra, changes)
1054 cctx = context.workingctx(self, text, user, date, extra, changes)
1055 if editor:
1055 if editor:
1056 cctx._text = editor(self, cctx, subs)
1056 cctx._text = editor(self, cctx, subs)
1057 edited = (text != cctx._text)
1057 edited = (text != cctx._text)
1058
1058
1059 # commit subs
1059 # commit subs
1060 if subs or removedsubs:
1060 if subs or removedsubs:
1061 state = wctx.substate.copy()
1061 state = wctx.substate.copy()
1062 for s in sorted(subs):
1062 for s in sorted(subs):
1063 sub = wctx.sub(s)
1063 sub = wctx.sub(s)
1064 self.ui.status(_('committing subrepository %s\n') %
1064 self.ui.status(_('committing subrepository %s\n') %
1065 subrepo.subrelpath(sub))
1065 subrepo.subrelpath(sub))
1066 sr = sub.commit(cctx._text, user, date)
1066 sr = sub.commit(cctx._text, user, date)
1067 state[s] = (state[s][0], sr)
1067 state[s] = (state[s][0], sr)
1068 subrepo.writestate(self, state)
1068 subrepo.writestate(self, state)
1069
1069
1070 # Save commit message in case this transaction gets rolled back
1070 # Save commit message in case this transaction gets rolled back
1071 # (e.g. by a pretxncommit hook). Leave the content alone on
1071 # (e.g. by a pretxncommit hook). Leave the content alone on
1072 # the assumption that the user will use the same editor again.
1072 # the assumption that the user will use the same editor again.
1073 msgfn = self.savecommitmessage(cctx._text)
1073 msgfn = self.savecommitmessage(cctx._text)
1074
1074
1075 p1, p2 = self.dirstate.parents()
1075 p1, p2 = self.dirstate.parents()
1076 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1076 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1077 try:
1077 try:
1078 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1078 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1079 ret = self.commitctx(cctx, True)
1079 ret = self.commitctx(cctx, True)
1080 except:
1080 except:
1081 if edited:
1081 if edited:
1082 self.ui.write(
1082 self.ui.write(
1083 _('note: commit message saved in %s\n') % msgfn)
1083 _('note: commit message saved in %s\n') % msgfn)
1084 raise
1084 raise
1085
1085
1086 # update bookmarks, dirstate and mergestate
1086 # update bookmarks, dirstate and mergestate
1087 bookmarks.update(self, p1, ret)
1087 bookmarks.update(self, p1, ret)
1088 for f in changes[0] + changes[1]:
1088 for f in changes[0] + changes[1]:
1089 self.dirstate.normal(f)
1089 self.dirstate.normal(f)
1090 for f in changes[2]:
1090 for f in changes[2]:
1091 self.dirstate.drop(f)
1091 self.dirstate.drop(f)
1092 self.dirstate.setparents(ret)
1092 self.dirstate.setparents(ret)
1093 ms.reset()
1093 ms.reset()
1094 finally:
1094 finally:
1095 wlock.release()
1095 wlock.release()
1096
1096
1097 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1097 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1098 return ret
1098 return ret
1099
1099
1100 def commitctx(self, ctx, error=False):
1100 def commitctx(self, ctx, error=False):
1101 """Add a new revision to current repository.
1101 """Add a new revision to current repository.
1102 Revision information is passed via the context argument.
1102 Revision information is passed via the context argument.
1103 """
1103 """
1104
1104
1105 tr = lock = None
1105 tr = lock = None
1106 removed = list(ctx.removed())
1106 removed = list(ctx.removed())
1107 p1, p2 = ctx.p1(), ctx.p2()
1107 p1, p2 = ctx.p1(), ctx.p2()
1108 user = ctx.user()
1108 user = ctx.user()
1109
1109
1110 lock = self.lock()
1110 lock = self.lock()
1111 try:
1111 try:
1112 tr = self.transaction("commit")
1112 tr = self.transaction("commit")
1113 trp = weakref.proxy(tr)
1113 trp = weakref.proxy(tr)
1114
1114
1115 if ctx.files():
1115 if ctx.files():
1116 m1 = p1.manifest().copy()
1116 m1 = p1.manifest().copy()
1117 m2 = p2.manifest()
1117 m2 = p2.manifest()
1118
1118
1119 # check in files
1119 # check in files
1120 new = {}
1120 new = {}
1121 changed = []
1121 changed = []
1122 linkrev = len(self)
1122 linkrev = len(self)
1123 for f in sorted(ctx.modified() + ctx.added()):
1123 for f in sorted(ctx.modified() + ctx.added()):
1124 self.ui.note(f + "\n")
1124 self.ui.note(f + "\n")
1125 try:
1125 try:
1126 fctx = ctx[f]
1126 fctx = ctx[f]
1127 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1127 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1128 changed)
1128 changed)
1129 m1.set(f, fctx.flags())
1129 m1.set(f, fctx.flags())
1130 except OSError, inst:
1130 except OSError, inst:
1131 self.ui.warn(_("trouble committing %s!\n") % f)
1131 self.ui.warn(_("trouble committing %s!\n") % f)
1132 raise
1132 raise
1133 except IOError, inst:
1133 except IOError, inst:
1134 errcode = getattr(inst, 'errno', errno.ENOENT)
1134 errcode = getattr(inst, 'errno', errno.ENOENT)
1135 if error or errcode and errcode != errno.ENOENT:
1135 if error or errcode and errcode != errno.ENOENT:
1136 self.ui.warn(_("trouble committing %s!\n") % f)
1136 self.ui.warn(_("trouble committing %s!\n") % f)
1137 raise
1137 raise
1138 else:
1138 else:
1139 removed.append(f)
1139 removed.append(f)
1140
1140
1141 # update manifest
1141 # update manifest
1142 m1.update(new)
1142 m1.update(new)
1143 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1143 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1144 drop = [f for f in removed if f in m1]
1144 drop = [f for f in removed if f in m1]
1145 for f in drop:
1145 for f in drop:
1146 del m1[f]
1146 del m1[f]
1147 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1147 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1148 p2.manifestnode(), (new, drop))
1148 p2.manifestnode(), (new, drop))
1149 files = changed + removed
1149 files = changed + removed
1150 else:
1150 else:
1151 mn = p1.manifestnode()
1151 mn = p1.manifestnode()
1152 files = []
1152 files = []
1153
1153
1154 # update changelog
1154 # update changelog
1155 self.changelog.delayupdate()
1155 self.changelog.delayupdate()
1156 n = self.changelog.add(mn, files, ctx.description(),
1156 n = self.changelog.add(mn, files, ctx.description(),
1157 trp, p1.node(), p2.node(),
1157 trp, p1.node(), p2.node(),
1158 user, ctx.date(), ctx.extra().copy())
1158 user, ctx.date(), ctx.extra().copy())
1159 p = lambda: self.changelog.writepending() and self.root or ""
1159 p = lambda: self.changelog.writepending() and self.root or ""
1160 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1160 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1161 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1161 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1162 parent2=xp2, pending=p)
1162 parent2=xp2, pending=p)
1163 self.changelog.finalize(trp)
1163 self.changelog.finalize(trp)
1164 tr.close()
1164 tr.close()
1165
1165
1166 if self._branchcache:
1166 if self._branchcache:
1167 self.updatebranchcache()
1167 self.updatebranchcache()
1168 return n
1168 return n
1169 finally:
1169 finally:
1170 if tr:
1170 if tr:
1171 tr.release()
1171 tr.release()
1172 lock.release()
1172 lock.release()
1173
1173
1174 def destroyed(self):
1174 def destroyed(self):
1175 '''Inform the repository that nodes have been destroyed.
1175 '''Inform the repository that nodes have been destroyed.
1176 Intended for use by strip and rollback, so there's a common
1176 Intended for use by strip and rollback, so there's a common
1177 place for anything that has to be done after destroying history.'''
1177 place for anything that has to be done after destroying history.'''
1178 # XXX it might be nice if we could take the list of destroyed
1178 # XXX it might be nice if we could take the list of destroyed
1179 # nodes, but I don't see an easy way for rollback() to do that
1179 # nodes, but I don't see an easy way for rollback() to do that
1180
1180
1181 # Ensure the persistent tag cache is updated. Doing it now
1181 # Ensure the persistent tag cache is updated. Doing it now
1182 # means that the tag cache only has to worry about destroyed
1182 # means that the tag cache only has to worry about destroyed
1183 # heads immediately after a strip/rollback. That in turn
1183 # heads immediately after a strip/rollback. That in turn
1184 # guarantees that "cachetip == currenttip" (comparing both rev
1184 # guarantees that "cachetip == currenttip" (comparing both rev
1185 # and node) always means no nodes have been added or destroyed.
1185 # and node) always means no nodes have been added or destroyed.
1186
1186
1187 # XXX this is suboptimal when qrefresh'ing: we strip the current
1187 # XXX this is suboptimal when qrefresh'ing: we strip the current
1188 # head, refresh the tag cache, then immediately add a new head.
1188 # head, refresh the tag cache, then immediately add a new head.
1189 # But I think doing it this way is necessary for the "instant
1189 # But I think doing it this way is necessary for the "instant
1190 # tag cache retrieval" case to work.
1190 # tag cache retrieval" case to work.
1191 self.invalidatecaches()
1191 self.invalidatecaches()
1192
1192
1193 def walk(self, match, node=None):
1193 def walk(self, match, node=None):
1194 '''
1194 '''
1195 walk recursively through the directory tree or a given
1195 walk recursively through the directory tree or a given
1196 changeset, finding all files matched by the match
1196 changeset, finding all files matched by the match
1197 function
1197 function
1198 '''
1198 '''
1199 return self[node].walk(match)
1199 return self[node].walk(match)
1200
1200
1201 def status(self, node1='.', node2=None, match=None,
1201 def status(self, node1='.', node2=None, match=None,
1202 ignored=False, clean=False, unknown=False,
1202 ignored=False, clean=False, unknown=False,
1203 listsubrepos=False):
1203 listsubrepos=False):
1204 """return status of files between two nodes or node and working directory
1204 """return status of files between two nodes or node and working directory
1205
1205
1206 If node1 is None, use the first dirstate parent instead.
1206 If node1 is None, use the first dirstate parent instead.
1207 If node2 is None, compare node1 with working directory.
1207 If node2 is None, compare node1 with working directory.
1208 """
1208 """
1209
1209
1210 def mfmatches(ctx):
1210 def mfmatches(ctx):
1211 mf = ctx.manifest().copy()
1211 mf = ctx.manifest().copy()
1212 for fn in mf.keys():
1212 for fn in mf.keys():
1213 if not match(fn):
1213 if not match(fn):
1214 del mf[fn]
1214 del mf[fn]
1215 return mf
1215 return mf
1216
1216
1217 if isinstance(node1, context.changectx):
1217 if isinstance(node1, context.changectx):
1218 ctx1 = node1
1218 ctx1 = node1
1219 else:
1219 else:
1220 ctx1 = self[node1]
1220 ctx1 = self[node1]
1221 if isinstance(node2, context.changectx):
1221 if isinstance(node2, context.changectx):
1222 ctx2 = node2
1222 ctx2 = node2
1223 else:
1223 else:
1224 ctx2 = self[node2]
1224 ctx2 = self[node2]
1225
1225
1226 working = ctx2.rev() is None
1226 working = ctx2.rev() is None
1227 parentworking = working and ctx1 == self['.']
1227 parentworking = working and ctx1 == self['.']
1228 match = match or matchmod.always(self.root, self.getcwd())
1228 match = match or matchmod.always(self.root, self.getcwd())
1229 listignored, listclean, listunknown = ignored, clean, unknown
1229 listignored, listclean, listunknown = ignored, clean, unknown
1230
1230
1231 # load earliest manifest first for caching reasons
1231 # load earliest manifest first for caching reasons
1232 if not working and ctx2.rev() < ctx1.rev():
1232 if not working and ctx2.rev() < ctx1.rev():
1233 ctx2.manifest()
1233 ctx2.manifest()
1234
1234
1235 if not parentworking:
1235 if not parentworking:
1236 def bad(f, msg):
1236 def bad(f, msg):
1237 if f not in ctx1:
1237 if f not in ctx1:
1238 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1238 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1239 match.bad = bad
1239 match.bad = bad
1240
1240
1241 if working: # we need to scan the working dir
1241 if working: # we need to scan the working dir
1242 subrepos = []
1242 subrepos = []
1243 if '.hgsub' in self.dirstate:
1243 if '.hgsub' in self.dirstate:
1244 subrepos = ctx2.substate.keys()
1244 subrepos = ctx2.substate.keys()
1245 s = self.dirstate.status(match, subrepos, listignored,
1245 s = self.dirstate.status(match, subrepos, listignored,
1246 listclean, listunknown)
1246 listclean, listunknown)
1247 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1247 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1248
1248
1249 # check for any possibly clean files
1249 # check for any possibly clean files
1250 if parentworking and cmp:
1250 if parentworking and cmp:
1251 fixup = []
1251 fixup = []
1252 # do a full compare of any files that might have changed
1252 # do a full compare of any files that might have changed
1253 for f in sorted(cmp):
1253 for f in sorted(cmp):
1254 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1254 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1255 or ctx1[f].cmp(ctx2[f])):
1255 or ctx1[f].cmp(ctx2[f])):
1256 modified.append(f)
1256 modified.append(f)
1257 else:
1257 else:
1258 fixup.append(f)
1258 fixup.append(f)
1259
1259
1260 # update dirstate for files that are actually clean
1260 # update dirstate for files that are actually clean
1261 if fixup:
1261 if fixup:
1262 if listclean:
1262 if listclean:
1263 clean += fixup
1263 clean += fixup
1264
1264
1265 try:
1265 try:
1266 # updating the dirstate is optional
1266 # updating the dirstate is optional
1267 # so we don't wait on the lock
1267 # so we don't wait on the lock
1268 wlock = self.wlock(False)
1268 wlock = self.wlock(False)
1269 try:
1269 try:
1270 for f in fixup:
1270 for f in fixup:
1271 self.dirstate.normal(f)
1271 self.dirstate.normal(f)
1272 finally:
1272 finally:
1273 wlock.release()
1273 wlock.release()
1274 except error.LockError:
1274 except error.LockError:
1275 pass
1275 pass
1276
1276
1277 if not parentworking:
1277 if not parentworking:
1278 mf1 = mfmatches(ctx1)
1278 mf1 = mfmatches(ctx1)
1279 if working:
1279 if working:
1280 # we are comparing working dir against non-parent
1280 # we are comparing working dir against non-parent
1281 # generate a pseudo-manifest for the working dir
1281 # generate a pseudo-manifest for the working dir
1282 mf2 = mfmatches(self['.'])
1282 mf2 = mfmatches(self['.'])
1283 for f in cmp + modified + added:
1283 for f in cmp + modified + added:
1284 mf2[f] = None
1284 mf2[f] = None
1285 mf2.set(f, ctx2.flags(f))
1285 mf2.set(f, ctx2.flags(f))
1286 for f in removed:
1286 for f in removed:
1287 if f in mf2:
1287 if f in mf2:
1288 del mf2[f]
1288 del mf2[f]
1289 else:
1289 else:
1290 # we are comparing two revisions
1290 # we are comparing two revisions
1291 deleted, unknown, ignored = [], [], []
1291 deleted, unknown, ignored = [], [], []
1292 mf2 = mfmatches(ctx2)
1292 mf2 = mfmatches(ctx2)
1293
1293
1294 modified, added, clean = [], [], []
1294 modified, added, clean = [], [], []
1295 for fn in mf2:
1295 for fn in mf2:
1296 if fn in mf1:
1296 if fn in mf1:
1297 if (fn not in deleted and
1297 if (fn not in deleted and
1298 (mf1.flags(fn) != mf2.flags(fn) or
1298 (mf1.flags(fn) != mf2.flags(fn) or
1299 (mf1[fn] != mf2[fn] and
1299 (mf1[fn] != mf2[fn] and
1300 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1300 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1301 modified.append(fn)
1301 modified.append(fn)
1302 elif listclean:
1302 elif listclean:
1303 clean.append(fn)
1303 clean.append(fn)
1304 del mf1[fn]
1304 del mf1[fn]
1305 elif fn not in deleted:
1305 elif fn not in deleted:
1306 added.append(fn)
1306 added.append(fn)
1307 removed = mf1.keys()
1307 removed = mf1.keys()
1308
1308
1309 r = modified, added, removed, deleted, unknown, ignored, clean
1309 r = modified, added, removed, deleted, unknown, ignored, clean
1310
1310
1311 if listsubrepos:
1311 if listsubrepos:
1312 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1312 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1313 if working:
1313 if working:
1314 rev2 = None
1314 rev2 = None
1315 else:
1315 else:
1316 rev2 = ctx2.substate[subpath][1]
1316 rev2 = ctx2.substate[subpath][1]
1317 try:
1317 try:
1318 submatch = matchmod.narrowmatcher(subpath, match)
1318 submatch = matchmod.narrowmatcher(subpath, match)
1319 s = sub.status(rev2, match=submatch, ignored=listignored,
1319 s = sub.status(rev2, match=submatch, ignored=listignored,
1320 clean=listclean, unknown=listunknown,
1320 clean=listclean, unknown=listunknown,
1321 listsubrepos=True)
1321 listsubrepos=True)
1322 for rfiles, sfiles in zip(r, s):
1322 for rfiles, sfiles in zip(r, s):
1323 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1323 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1324 except error.LookupError:
1324 except error.LookupError:
1325 self.ui.status(_("skipping missing subrepository: %s\n")
1325 self.ui.status(_("skipping missing subrepository: %s\n")
1326 % subpath)
1326 % subpath)
1327
1327
1328 for l in r:
1328 for l in r:
1329 l.sort()
1329 l.sort()
1330 return r
1330 return r
1331
1331
1332 def heads(self, start=None):
1332 def heads(self, start=None):
1333 heads = self.changelog.heads(start)
1333 heads = self.changelog.heads(start)
1334 # sort the output in rev descending order
1334 # sort the output in rev descending order
1335 return sorted(heads, key=self.changelog.rev, reverse=True)
1335 return sorted(heads, key=self.changelog.rev, reverse=True)
1336
1336
1337 def branchheads(self, branch=None, start=None, closed=False):
1337 def branchheads(self, branch=None, start=None, closed=False):
1338 '''return a (possibly filtered) list of heads for the given branch
1338 '''return a (possibly filtered) list of heads for the given branch
1339
1339
1340 Heads are returned in topological order, from newest to oldest.
1340 Heads are returned in topological order, from newest to oldest.
1341 If branch is None, use the dirstate branch.
1341 If branch is None, use the dirstate branch.
1342 If start is not None, return only heads reachable from start.
1342 If start is not None, return only heads reachable from start.
1343 If closed is True, return heads that are marked as closed as well.
1343 If closed is True, return heads that are marked as closed as well.
1344 '''
1344 '''
1345 if branch is None:
1345 if branch is None:
1346 branch = self[None].branch()
1346 branch = self[None].branch()
1347 branches = self.branchmap()
1347 branches = self.branchmap()
1348 if branch not in branches:
1348 if branch not in branches:
1349 return []
1349 return []
1350 # the cache returns heads ordered lowest to highest
1350 # the cache returns heads ordered lowest to highest
1351 bheads = list(reversed(branches[branch]))
1351 bheads = list(reversed(branches[branch]))
1352 if start is not None:
1352 if start is not None:
1353 # filter out the heads that cannot be reached from startrev
1353 # filter out the heads that cannot be reached from startrev
1354 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1354 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1355 bheads = [h for h in bheads if h in fbheads]
1355 bheads = [h for h in bheads if h in fbheads]
1356 if not closed:
1356 if not closed:
1357 bheads = [h for h in bheads if
1357 bheads = [h for h in bheads if
1358 ('close' not in self.changelog.read(h)[5])]
1358 ('close' not in self.changelog.read(h)[5])]
1359 return bheads
1359 return bheads
1360
1360
1361 def branches(self, nodes):
1361 def branches(self, nodes):
1362 if not nodes:
1362 if not nodes:
1363 nodes = [self.changelog.tip()]
1363 nodes = [self.changelog.tip()]
1364 b = []
1364 b = []
1365 for n in nodes:
1365 for n in nodes:
1366 t = n
1366 t = n
1367 while True:
1367 while True:
1368 p = self.changelog.parents(n)
1368 p = self.changelog.parents(n)
1369 if p[1] != nullid or p[0] == nullid:
1369 if p[1] != nullid or p[0] == nullid:
1370 b.append((t, n, p[0], p[1]))
1370 b.append((t, n, p[0], p[1]))
1371 break
1371 break
1372 n = p[0]
1372 n = p[0]
1373 return b
1373 return b
1374
1374
1375 def between(self, pairs):
1375 def between(self, pairs):
1376 r = []
1376 r = []
1377
1377
1378 for top, bottom in pairs:
1378 for top, bottom in pairs:
1379 n, l, i = top, [], 0
1379 n, l, i = top, [], 0
1380 f = 1
1380 f = 1
1381
1381
1382 while n != bottom and n != nullid:
1382 while n != bottom and n != nullid:
1383 p = self.changelog.parents(n)[0]
1383 p = self.changelog.parents(n)[0]
1384 if i == f:
1384 if i == f:
1385 l.append(n)
1385 l.append(n)
1386 f = f * 2
1386 f = f * 2
1387 n = p
1387 n = p
1388 i += 1
1388 i += 1
1389
1389
1390 r.append(l)
1390 r.append(l)
1391
1391
1392 return r
1392 return r
1393
1393
1394 def pull(self, remote, heads=None, force=False):
1394 def pull(self, remote, heads=None, force=False):
1395 lock = self.lock()
1395 lock = self.lock()
1396 try:
1396 try:
1397 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1397 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1398 force=force)
1398 force=force)
1399 common, fetch, rheads = tmp
1399 common, fetch, rheads = tmp
1400 if not fetch:
1400 if not fetch:
1401 self.ui.status(_("no changes found\n"))
1401 self.ui.status(_("no changes found\n"))
1402 result = 0
1402 result = 0
1403 else:
1403 else:
1404 if heads is None and list(common) == [nullid]:
1404 if heads is None and list(common) == [nullid]:
1405 self.ui.status(_("requesting all changes\n"))
1405 self.ui.status(_("requesting all changes\n"))
1406 elif heads is None and remote.capable('changegroupsubset'):
1406 elif heads is None and remote.capable('changegroupsubset'):
1407 # issue1320, avoid a race if remote changed after discovery
1407 # issue1320, avoid a race if remote changed after discovery
1408 heads = rheads
1408 heads = rheads
1409
1409
1410 if remote.capable('getbundle'):
1410 if remote.capable('getbundle'):
1411 cg = remote.getbundle('pull', common=common,
1411 cg = remote.getbundle('pull', common=common,
1412 heads=heads or rheads)
1412 heads=heads or rheads)
1413 elif heads is None:
1413 elif heads is None:
1414 cg = remote.changegroup(fetch, 'pull')
1414 cg = remote.changegroup(fetch, 'pull')
1415 elif not remote.capable('changegroupsubset'):
1415 elif not remote.capable('changegroupsubset'):
1416 raise util.Abort(_("partial pull cannot be done because "
1416 raise util.Abort(_("partial pull cannot be done because "
1417 "other repository doesn't support "
1417 "other repository doesn't support "
1418 "changegroupsubset."))
1418 "changegroupsubset."))
1419 else:
1419 else:
1420 cg = remote.changegroupsubset(fetch, heads, 'pull')
1420 cg = remote.changegroupsubset(fetch, heads, 'pull')
1421 result = self.addchangegroup(cg, 'pull', remote.url(),
1421 result = self.addchangegroup(cg, 'pull', remote.url(),
1422 lock=lock)
1422 lock=lock)
1423 finally:
1423 finally:
1424 lock.release()
1424 lock.release()
1425
1425
1426 return result
1426 return result
1427
1427
1428 def checkpush(self, force, revs):
1428 def checkpush(self, force, revs):
1429 """Extensions can override this function if additional checks have
1429 """Extensions can override this function if additional checks have
1430 to be performed before pushing, or call it if they override push
1430 to be performed before pushing, or call it if they override push
1431 command.
1431 command.
1432 """
1432 """
1433 pass
1433 pass
1434
1434
1435 def push(self, remote, force=False, revs=None, newbranch=False):
1435 def push(self, remote, force=False, revs=None, newbranch=False):
1436 '''Push outgoing changesets (limited by revs) from the current
1436 '''Push outgoing changesets (limited by revs) from the current
1437 repository to remote. Return an integer:
1437 repository to remote. Return an integer:
1438 - 0 means HTTP error *or* nothing to push
1438 - 0 means HTTP error *or* nothing to push
1439 - 1 means we pushed and remote head count is unchanged *or*
1439 - 1 means we pushed and remote head count is unchanged *or*
1440 we have outgoing changesets but refused to push
1440 we have outgoing changesets but refused to push
1441 - other values as described by addchangegroup()
1441 - other values as described by addchangegroup()
1442 '''
1442 '''
1443 # there are two ways to push to remote repo:
1443 # there are two ways to push to remote repo:
1444 #
1444 #
1445 # addchangegroup assumes local user can lock remote
1445 # addchangegroup assumes local user can lock remote
1446 # repo (local filesystem, old ssh servers).
1446 # repo (local filesystem, old ssh servers).
1447 #
1447 #
1448 # unbundle assumes local user cannot lock remote repo (new ssh
1448 # unbundle assumes local user cannot lock remote repo (new ssh
1449 # servers, http servers).
1449 # servers, http servers).
1450
1450
1451 self.checkpush(force, revs)
1451 self.checkpush(force, revs)
1452 lock = None
1452 lock = None
1453 unbundle = remote.capable('unbundle')
1453 unbundle = remote.capable('unbundle')
1454 if not unbundle:
1454 if not unbundle:
1455 lock = remote.lock()
1455 lock = remote.lock()
1456 try:
1456 try:
1457 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1457 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1458 newbranch)
1458 newbranch)
1459 ret = remote_heads
1459 ret = remote_heads
1460 if cg is not None:
1460 if cg is not None:
1461 if unbundle:
1461 if unbundle:
1462 # local repo finds heads on server, finds out what
1462 # local repo finds heads on server, finds out what
1463 # revs it must push. once revs transferred, if server
1463 # revs it must push. once revs transferred, if server
1464 # finds it has different heads (someone else won
1464 # finds it has different heads (someone else won
1465 # commit/push race), server aborts.
1465 # commit/push race), server aborts.
1466 if force:
1466 if force:
1467 remote_heads = ['force']
1467 remote_heads = ['force']
1468 # ssh: return remote's addchangegroup()
1468 # ssh: return remote's addchangegroup()
1469 # http: return remote's addchangegroup() or 0 for error
1469 # http: return remote's addchangegroup() or 0 for error
1470 ret = remote.unbundle(cg, remote_heads, 'push')
1470 ret = remote.unbundle(cg, remote_heads, 'push')
1471 else:
1471 else:
1472 # we return an integer indicating remote head count change
1472 # we return an integer indicating remote head count change
1473 ret = remote.addchangegroup(cg, 'push', self.url(),
1473 ret = remote.addchangegroup(cg, 'push', self.url(),
1474 lock=lock)
1474 lock=lock)
1475 finally:
1475 finally:
1476 if lock is not None:
1476 if lock is not None:
1477 lock.release()
1477 lock.release()
1478
1478
1479 self.ui.debug("checking for updated bookmarks\n")
1479 self.ui.debug("checking for updated bookmarks\n")
1480 rb = remote.listkeys('bookmarks')
1480 rb = remote.listkeys('bookmarks')
1481 for k in rb.keys():
1481 for k in rb.keys():
1482 if k in self._bookmarks:
1482 if k in self._bookmarks:
1483 nr, nl = rb[k], hex(self._bookmarks[k])
1483 nr, nl = rb[k], hex(self._bookmarks[k])
1484 if nr in self:
1484 if nr in self:
1485 cr = self[nr]
1485 cr = self[nr]
1486 cl = self[nl]
1486 cl = self[nl]
1487 if cl in cr.descendants():
1487 if cl in cr.descendants():
1488 r = remote.pushkey('bookmarks', k, nr, nl)
1488 r = remote.pushkey('bookmarks', k, nr, nl)
1489 if r:
1489 if r:
1490 self.ui.status(_("updating bookmark %s\n") % k)
1490 self.ui.status(_("updating bookmark %s\n") % k)
1491 else:
1491 else:
1492 self.ui.warn(_('updating bookmark %s'
1492 self.ui.warn(_('updating bookmark %s'
1493 ' failed!\n') % k)
1493 ' failed!\n') % k)
1494
1494
1495 return ret
1495 return ret
1496
1496
1497 def changegroupinfo(self, nodes, source):
1497 def changegroupinfo(self, nodes, source):
1498 if self.ui.verbose or source == 'bundle':
1498 if self.ui.verbose or source == 'bundle':
1499 self.ui.status(_("%d changesets found\n") % len(nodes))
1499 self.ui.status(_("%d changesets found\n") % len(nodes))
1500 if self.ui.debugflag:
1500 if self.ui.debugflag:
1501 self.ui.debug("list of changesets:\n")
1501 self.ui.debug("list of changesets:\n")
1502 for node in nodes:
1502 for node in nodes:
1503 self.ui.debug("%s\n" % hex(node))
1503 self.ui.debug("%s\n" % hex(node))
1504
1504
1505 def changegroupsubset(self, bases, heads, source):
1505 def changegroupsubset(self, bases, heads, source):
1506 """Compute a changegroup consisting of all the nodes that are
1506 """Compute a changegroup consisting of all the nodes that are
1507 descendants of any of the bases and ancestors of any of the heads.
1507 descendants of any of the bases and ancestors of any of the heads.
1508 Return a chunkbuffer object whose read() method will return
1508 Return a chunkbuffer object whose read() method will return
1509 successive changegroup chunks.
1509 successive changegroup chunks.
1510
1510
1511 It is fairly complex as determining which filenodes and which
1511 It is fairly complex as determining which filenodes and which
1512 manifest nodes need to be included for the changeset to be complete
1512 manifest nodes need to be included for the changeset to be complete
1513 is non-trivial.
1513 is non-trivial.
1514
1514
1515 Another wrinkle is doing the reverse, figuring out which changeset in
1515 Another wrinkle is doing the reverse, figuring out which changeset in
1516 the changegroup a particular filenode or manifestnode belongs to.
1516 the changegroup a particular filenode or manifestnode belongs to.
1517 """
1517 """
1518 cl = self.changelog
1518 cl = self.changelog
1519 if not bases:
1519 if not bases:
1520 bases = [nullid]
1520 bases = [nullid]
1521 csets, bases, heads = cl.nodesbetween(bases, heads)
1521 csets, bases, heads = cl.nodesbetween(bases, heads)
1522 # We assume that all ancestors of bases are known
1522 # We assume that all ancestors of bases are known
1523 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1523 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1524 return self._changegroupsubset(common, csets, heads, source)
1524 return self._changegroupsubset(common, csets, heads, source)
1525
1525
1526 def getbundle(self, source, heads=None, common=None):
1526 def getbundle(self, source, heads=None, common=None):
1527 """Like changegroupsubset, but returns the set difference between the
1527 """Like changegroupsubset, but returns the set difference between the
1528 ancestors of heads and the ancestors common.
1528 ancestors of heads and the ancestors common.
1529
1529
1530 If heads is None, use the local heads. If common is None, use [nullid].
1530 If heads is None, use the local heads. If common is None, use [nullid].
1531
1531
1532 The nodes in common might not all be known locally due to the way the
1532 The nodes in common might not all be known locally due to the way the
1533 current discovery protocol works.
1533 current discovery protocol works.
1534 """
1534 """
1535 cl = self.changelog
1535 cl = self.changelog
1536 if common:
1536 if common:
1537 nm = cl.nodemap
1537 nm = cl.nodemap
1538 common = [n for n in common if n in nm]
1538 common = [n for n in common if n in nm]
1539 else:
1539 else:
1540 common = [nullid]
1540 common = [nullid]
1541 if not heads:
1541 if not heads:
1542 heads = cl.heads()
1542 heads = cl.heads()
1543 common, missing = cl.findcommonmissing(common, heads)
1543 common, missing = cl.findcommonmissing(common, heads)
1544 if not missing:
1544 if not missing:
1545 return None
1545 return None
1546 return self._changegroupsubset(common, missing, heads, source)
1546 return self._changegroupsubset(common, missing, heads, source)
1547
1547
1548 def _changegroupsubset(self, commonrevs, csets, heads, source):
1548 def _changegroupsubset(self, commonrevs, csets, heads, source):
1549
1549
1550 cl = self.changelog
1550 cl = self.changelog
1551 mf = self.manifest
1551 mf = self.manifest
1552 mfs = {} # needed manifests
1552 mfs = {} # needed manifests
1553 fnodes = {} # needed file nodes
1553 fnodes = {} # needed file nodes
1554 changedfiles = set()
1554 changedfiles = set()
1555 fstate = ['', {}]
1555 fstate = ['', {}]
1556 count = [0]
1556 count = [0]
1557
1557
1558 # can we go through the fast path ?
1558 # can we go through the fast path ?
1559 heads.sort()
1559 heads.sort()
1560 if heads == sorted(self.heads()):
1560 if heads == sorted(self.heads()):
1561 return self._changegroup(csets, source)
1561 return self._changegroup(csets, source)
1562
1562
1563 # slow path
1563 # slow path
1564 self.hook('preoutgoing', throw=True, source=source)
1564 self.hook('preoutgoing', throw=True, source=source)
1565 self.changegroupinfo(csets, source)
1565 self.changegroupinfo(csets, source)
1566
1566
1567 # filter any nodes that claim to be part of the known set
1567 # filter any nodes that claim to be part of the known set
1568 def prune(revlog, missing):
1568 def prune(revlog, missing):
1569 return [n for n in missing
1569 return [n for n in missing
1570 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1570 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1571
1571
1572 def lookup(revlog, x):
1572 def lookup(revlog, x):
1573 if revlog == cl:
1573 if revlog == cl:
1574 c = cl.read(x)
1574 c = cl.read(x)
1575 changedfiles.update(c[3])
1575 changedfiles.update(c[3])
1576 mfs.setdefault(c[0], x)
1576 mfs.setdefault(c[0], x)
1577 count[0] += 1
1577 count[0] += 1
1578 self.ui.progress(_('bundling'), count[0],
1578 self.ui.progress(_('bundling'), count[0],
1579 unit=_('changesets'), total=len(csets))
1579 unit=_('changesets'), total=len(csets))
1580 return x
1580 return x
1581 elif revlog == mf:
1581 elif revlog == mf:
1582 clnode = mfs[x]
1582 clnode = mfs[x]
1583 mdata = mf.readfast(x)
1583 mdata = mf.readfast(x)
1584 for f in changedfiles:
1584 for f in changedfiles:
1585 if f in mdata:
1585 if f in mdata:
1586 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1586 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1587 count[0] += 1
1587 count[0] += 1
1588 self.ui.progress(_('bundling'), count[0],
1588 self.ui.progress(_('bundling'), count[0],
1589 unit=_('manifests'), total=len(mfs))
1589 unit=_('manifests'), total=len(mfs))
1590 return mfs[x]
1590 return mfs[x]
1591 else:
1591 else:
1592 self.ui.progress(
1592 self.ui.progress(
1593 _('bundling'), count[0], item=fstate[0],
1593 _('bundling'), count[0], item=fstate[0],
1594 unit=_('files'), total=len(changedfiles))
1594 unit=_('files'), total=len(changedfiles))
1595 return fstate[1][x]
1595 return fstate[1][x]
1596
1596
1597 bundler = changegroup.bundle10(lookup)
1597 bundler = changegroup.bundle10(lookup)
1598 reorder = self.ui.config('bundle', 'reorder', 'auto')
1598 reorder = self.ui.config('bundle', 'reorder', 'auto')
1599 if reorder == 'auto':
1599 if reorder == 'auto':
1600 reorder = None
1600 reorder = None
1601 else:
1601 else:
1602 reorder = util.parsebool(reorder)
1602 reorder = util.parsebool(reorder)
1603
1603
1604 def gengroup():
1604 def gengroup():
1605 # Create a changenode group generator that will call our functions
1605 # Create a changenode group generator that will call our functions
1606 # back to lookup the owning changenode and collect information.
1606 # back to lookup the owning changenode and collect information.
1607 for chunk in cl.group(csets, bundler, reorder=reorder):
1607 for chunk in cl.group(csets, bundler, reorder=reorder):
1608 yield chunk
1608 yield chunk
1609 self.ui.progress(_('bundling'), None)
1609 self.ui.progress(_('bundling'), None)
1610
1610
1611 # Create a generator for the manifestnodes that calls our lookup
1611 # Create a generator for the manifestnodes that calls our lookup
1612 # and data collection functions back.
1612 # and data collection functions back.
1613 count[0] = 0
1613 count[0] = 0
1614 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1614 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1615 yield chunk
1615 yield chunk
1616 self.ui.progress(_('bundling'), None)
1616 self.ui.progress(_('bundling'), None)
1617
1617
1618 mfs.clear()
1618 mfs.clear()
1619
1619
1620 # Go through all our files in order sorted by name.
1620 # Go through all our files in order sorted by name.
1621 count[0] = 0
1621 count[0] = 0
1622 for fname in sorted(changedfiles):
1622 for fname in sorted(changedfiles):
1623 filerevlog = self.file(fname)
1623 filerevlog = self.file(fname)
1624 if not len(filerevlog):
1624 if not len(filerevlog):
1625 raise util.Abort(_("empty or missing revlog for %s") % fname)
1625 raise util.Abort(_("empty or missing revlog for %s") % fname)
1626 fstate[0] = fname
1626 fstate[0] = fname
1627 fstate[1] = fnodes.pop(fname, {})
1627 fstate[1] = fnodes.pop(fname, {})
1628
1628
1629 nodelist = prune(filerevlog, fstate[1])
1629 nodelist = prune(filerevlog, fstate[1])
1630 if nodelist:
1630 if nodelist:
1631 count[0] += 1
1631 count[0] += 1
1632 yield bundler.fileheader(fname)
1632 yield bundler.fileheader(fname)
1633 for chunk in filerevlog.group(nodelist, bundler, reorder):
1633 for chunk in filerevlog.group(nodelist, bundler, reorder):
1634 yield chunk
1634 yield chunk
1635
1635
1636 # Signal that no more groups are left.
1636 # Signal that no more groups are left.
1637 yield bundler.close()
1637 yield bundler.close()
1638 self.ui.progress(_('bundling'), None)
1638 self.ui.progress(_('bundling'), None)
1639
1639
1640 if csets:
1640 if csets:
1641 self.hook('outgoing', node=hex(csets[0]), source=source)
1641 self.hook('outgoing', node=hex(csets[0]), source=source)
1642
1642
1643 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1643 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1644
1644
1645 def changegroup(self, basenodes, source):
1645 def changegroup(self, basenodes, source):
1646 # to avoid a race we use changegroupsubset() (issue1320)
1646 # to avoid a race we use changegroupsubset() (issue1320)
1647 return self.changegroupsubset(basenodes, self.heads(), source)
1647 return self.changegroupsubset(basenodes, self.heads(), source)
1648
1648
1649 def _changegroup(self, nodes, source):
1649 def _changegroup(self, nodes, source):
1650 """Compute the changegroup of all nodes that we have that a recipient
1650 """Compute the changegroup of all nodes that we have that a recipient
1651 doesn't. Return a chunkbuffer object whose read() method will return
1651 doesn't. Return a chunkbuffer object whose read() method will return
1652 successive changegroup chunks.
1652 successive changegroup chunks.
1653
1653
1654 This is much easier than the previous function as we can assume that
1654 This is much easier than the previous function as we can assume that
1655 the recipient has any changenode we aren't sending them.
1655 the recipient has any changenode we aren't sending them.
1656
1656
1657 nodes is the set of nodes to send"""
1657 nodes is the set of nodes to send"""
1658
1658
1659 cl = self.changelog
1659 cl = self.changelog
1660 mf = self.manifest
1660 mf = self.manifest
1661 mfs = {}
1661 mfs = {}
1662 changedfiles = set()
1662 changedfiles = set()
1663 fstate = ['']
1663 fstate = ['']
1664 count = [0]
1664 count = [0]
1665
1665
1666 self.hook('preoutgoing', throw=True, source=source)
1666 self.hook('preoutgoing', throw=True, source=source)
1667 self.changegroupinfo(nodes, source)
1667 self.changegroupinfo(nodes, source)
1668
1668
1669 revset = set([cl.rev(n) for n in nodes])
1669 revset = set([cl.rev(n) for n in nodes])
1670
1670
1671 def gennodelst(log):
1671 def gennodelst(log):
1672 return [log.node(r) for r in log if log.linkrev(r) in revset]
1672 return [log.node(r) for r in log if log.linkrev(r) in revset]
1673
1673
1674 def lookup(revlog, x):
1674 def lookup(revlog, x):
1675 if revlog == cl:
1675 if revlog == cl:
1676 c = cl.read(x)
1676 c = cl.read(x)
1677 changedfiles.update(c[3])
1677 changedfiles.update(c[3])
1678 mfs.setdefault(c[0], x)
1678 mfs.setdefault(c[0], x)
1679 count[0] += 1
1679 count[0] += 1
1680 self.ui.progress(_('bundling'), count[0],
1680 self.ui.progress(_('bundling'), count[0],
1681 unit=_('changesets'), total=len(nodes))
1681 unit=_('changesets'), total=len(nodes))
1682 return x
1682 return x
1683 elif revlog == mf:
1683 elif revlog == mf:
1684 count[0] += 1
1684 count[0] += 1
1685 self.ui.progress(_('bundling'), count[0],
1685 self.ui.progress(_('bundling'), count[0],
1686 unit=_('manifests'), total=len(mfs))
1686 unit=_('manifests'), total=len(mfs))
1687 return cl.node(revlog.linkrev(revlog.rev(x)))
1687 return cl.node(revlog.linkrev(revlog.rev(x)))
1688 else:
1688 else:
1689 self.ui.progress(
1689 self.ui.progress(
1690 _('bundling'), count[0], item=fstate[0],
1690 _('bundling'), count[0], item=fstate[0],
1691 total=len(changedfiles), unit=_('files'))
1691 total=len(changedfiles), unit=_('files'))
1692 return cl.node(revlog.linkrev(revlog.rev(x)))
1692 return cl.node(revlog.linkrev(revlog.rev(x)))
1693
1693
1694 bundler = changegroup.bundle10(lookup)
1694 bundler = changegroup.bundle10(lookup)
1695 reorder = self.ui.config('bundle', 'reorder', 'auto')
1695 reorder = self.ui.config('bundle', 'reorder', 'auto')
1696 if reorder == 'auto':
1696 if reorder == 'auto':
1697 reorder = None
1697 reorder = None
1698 else:
1698 else:
1699 reorder = util.parsebool(reorder)
1699 reorder = util.parsebool(reorder)
1700
1700
1701 def gengroup():
1701 def gengroup():
1702 '''yield a sequence of changegroup chunks (strings)'''
1702 '''yield a sequence of changegroup chunks (strings)'''
1703 # construct a list of all changed files
1703 # construct a list of all changed files
1704
1704
1705 for chunk in cl.group(nodes, bundler, reorder=reorder):
1705 for chunk in cl.group(nodes, bundler, reorder=reorder):
1706 yield chunk
1706 yield chunk
1707 self.ui.progress(_('bundling'), None)
1707 self.ui.progress(_('bundling'), None)
1708
1708
1709 count[0] = 0
1709 count[0] = 0
1710 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1710 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1711 yield chunk
1711 yield chunk
1712 self.ui.progress(_('bundling'), None)
1712 self.ui.progress(_('bundling'), None)
1713
1713
1714 count[0] = 0
1714 count[0] = 0
1715 for fname in sorted(changedfiles):
1715 for fname in sorted(changedfiles):
1716 filerevlog = self.file(fname)
1716 filerevlog = self.file(fname)
1717 if not len(filerevlog):
1717 if not len(filerevlog):
1718 raise util.Abort(_("empty or missing revlog for %s") % fname)
1718 raise util.Abort(_("empty or missing revlog for %s") % fname)
1719 fstate[0] = fname
1719 fstate[0] = fname
1720 nodelist = gennodelst(filerevlog)
1720 nodelist = gennodelst(filerevlog)
1721 if nodelist:
1721 if nodelist:
1722 count[0] += 1
1722 count[0] += 1
1723 yield bundler.fileheader(fname)
1723 yield bundler.fileheader(fname)
1724 for chunk in filerevlog.group(nodelist, bundler, reorder):
1724 for chunk in filerevlog.group(nodelist, bundler, reorder):
1725 yield chunk
1725 yield chunk
1726 yield bundler.close()
1726 yield bundler.close()
1727 self.ui.progress(_('bundling'), None)
1727 self.ui.progress(_('bundling'), None)
1728
1728
1729 if nodes:
1729 if nodes:
1730 self.hook('outgoing', node=hex(nodes[0]), source=source)
1730 self.hook('outgoing', node=hex(nodes[0]), source=source)
1731
1731
1732 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1732 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1733
1733
1734 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1734 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1735 """Add the changegroup returned by source.read() to this repo.
1735 """Add the changegroup returned by source.read() to this repo.
1736 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1736 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1737 the URL of the repo where this changegroup is coming from.
1737 the URL of the repo where this changegroup is coming from.
1738 If lock is not None, the function takes ownership of the lock
1738 If lock is not None, the function takes ownership of the lock
1739 and releases it after the changegroup is added.
1739 and releases it after the changegroup is added.
1740
1740
1741 Return an integer summarizing the change to this repo:
1741 Return an integer summarizing the change to this repo:
1742 - nothing changed or no source: 0
1742 - nothing changed or no source: 0
1743 - more heads than before: 1+added heads (2..n)
1743 - more heads than before: 1+added heads (2..n)
1744 - fewer heads than before: -1-removed heads (-2..-n)
1744 - fewer heads than before: -1-removed heads (-2..-n)
1745 - number of heads stays the same: 1
1745 - number of heads stays the same: 1
1746 """
1746 """
1747 def csmap(x):
1747 def csmap(x):
1748 self.ui.debug("add changeset %s\n" % short(x))
1748 self.ui.debug("add changeset %s\n" % short(x))
1749 return len(cl)
1749 return len(cl)
1750
1750
1751 def revmap(x):
1751 def revmap(x):
1752 return cl.rev(x)
1752 return cl.rev(x)
1753
1753
1754 if not source:
1754 if not source:
1755 return 0
1755 return 0
1756
1756
1757 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1757 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1758
1758
1759 changesets = files = revisions = 0
1759 changesets = files = revisions = 0
1760 efiles = set()
1760 efiles = set()
1761
1761
1762 # write changelog data to temp files so concurrent readers will not see
1762 # write changelog data to temp files so concurrent readers will not see
1763 # inconsistent view
1763 # inconsistent view
1764 cl = self.changelog
1764 cl = self.changelog
1765 cl.delayupdate()
1765 cl.delayupdate()
1766 oldheads = cl.heads()
1766 oldheads = cl.heads()
1767
1767
1768 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1768 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1769 try:
1769 try:
1770 trp = weakref.proxy(tr)
1770 trp = weakref.proxy(tr)
1771 # pull off the changeset group
1771 # pull off the changeset group
1772 self.ui.status(_("adding changesets\n"))
1772 self.ui.status(_("adding changesets\n"))
1773 clstart = len(cl)
1773 clstart = len(cl)
1774 class prog(object):
1774 class prog(object):
1775 step = _('changesets')
1775 step = _('changesets')
1776 count = 1
1776 count = 1
1777 ui = self.ui
1777 ui = self.ui
1778 total = None
1778 total = None
1779 def __call__(self):
1779 def __call__(self):
1780 self.ui.progress(self.step, self.count, unit=_('chunks'),
1780 self.ui.progress(self.step, self.count, unit=_('chunks'),
1781 total=self.total)
1781 total=self.total)
1782 self.count += 1
1782 self.count += 1
1783 pr = prog()
1783 pr = prog()
1784 source.callback = pr
1784 source.callback = pr
1785
1785
1786 source.changelogheader()
1786 source.changelogheader()
1787 if (cl.addgroup(source, csmap, trp) is None
1787 if (cl.addgroup(source, csmap, trp) is None
1788 and not emptyok):
1788 and not emptyok):
1789 raise util.Abort(_("received changelog group is empty"))
1789 raise util.Abort(_("received changelog group is empty"))
1790 clend = len(cl)
1790 clend = len(cl)
1791 changesets = clend - clstart
1791 changesets = clend - clstart
1792 for c in xrange(clstart, clend):
1792 for c in xrange(clstart, clend):
1793 efiles.update(self[c].files())
1793 efiles.update(self[c].files())
1794 efiles = len(efiles)
1794 efiles = len(efiles)
1795 self.ui.progress(_('changesets'), None)
1795 self.ui.progress(_('changesets'), None)
1796
1796
1797 # pull off the manifest group
1797 # pull off the manifest group
1798 self.ui.status(_("adding manifests\n"))
1798 self.ui.status(_("adding manifests\n"))
1799 pr.step = _('manifests')
1799 pr.step = _('manifests')
1800 pr.count = 1
1800 pr.count = 1
1801 pr.total = changesets # manifests <= changesets
1801 pr.total = changesets # manifests <= changesets
1802 # no need to check for empty manifest group here:
1802 # no need to check for empty manifest group here:
1803 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1803 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1804 # no new manifest will be created and the manifest group will
1804 # no new manifest will be created and the manifest group will
1805 # be empty during the pull
1805 # be empty during the pull
1806 source.manifestheader()
1806 source.manifestheader()
1807 self.manifest.addgroup(source, revmap, trp)
1807 self.manifest.addgroup(source, revmap, trp)
1808 self.ui.progress(_('manifests'), None)
1808 self.ui.progress(_('manifests'), None)
1809
1809
1810 needfiles = {}
1810 needfiles = {}
1811 if self.ui.configbool('server', 'validate', default=False):
1811 if self.ui.configbool('server', 'validate', default=False):
1812 # validate incoming csets have their manifests
1812 # validate incoming csets have their manifests
1813 for cset in xrange(clstart, clend):
1813 for cset in xrange(clstart, clend):
1814 mfest = self.changelog.read(self.changelog.node(cset))[0]
1814 mfest = self.changelog.read(self.changelog.node(cset))[0]
1815 mfest = self.manifest.readdelta(mfest)
1815 mfest = self.manifest.readdelta(mfest)
1816 # store file nodes we must see
1816 # store file nodes we must see
1817 for f, n in mfest.iteritems():
1817 for f, n in mfest.iteritems():
1818 needfiles.setdefault(f, set()).add(n)
1818 needfiles.setdefault(f, set()).add(n)
1819
1819
1820 # process the files
1820 # process the files
1821 self.ui.status(_("adding file changes\n"))
1821 self.ui.status(_("adding file changes\n"))
1822 pr.step = _('files')
1822 pr.step = _('files')
1823 pr.count = 1
1823 pr.count = 1
1824 pr.total = efiles
1824 pr.total = efiles
1825 source.callback = None
1825 source.callback = None
1826
1826
1827 while True:
1827 while True:
1828 chunkdata = source.filelogheader()
1828 chunkdata = source.filelogheader()
1829 if not chunkdata:
1829 if not chunkdata:
1830 break
1830 break
1831 f = chunkdata["filename"]
1831 f = chunkdata["filename"]
1832 self.ui.debug("adding %s revisions\n" % f)
1832 self.ui.debug("adding %s revisions\n" % f)
1833 pr()
1833 pr()
1834 fl = self.file(f)
1834 fl = self.file(f)
1835 o = len(fl)
1835 o = len(fl)
1836 if fl.addgroup(source, revmap, trp) is None:
1836 if fl.addgroup(source, revmap, trp) is None:
1837 raise util.Abort(_("received file revlog group is empty"))
1837 raise util.Abort(_("received file revlog group is empty"))
1838 revisions += len(fl) - o
1838 revisions += len(fl) - o
1839 files += 1
1839 files += 1
1840 if f in needfiles:
1840 if f in needfiles:
1841 needs = needfiles[f]
1841 needs = needfiles[f]
1842 for new in xrange(o, len(fl)):
1842 for new in xrange(o, len(fl)):
1843 n = fl.node(new)
1843 n = fl.node(new)
1844 if n in needs:
1844 if n in needs:
1845 needs.remove(n)
1845 needs.remove(n)
1846 if not needs:
1846 if not needs:
1847 del needfiles[f]
1847 del needfiles[f]
1848 self.ui.progress(_('files'), None)
1848 self.ui.progress(_('files'), None)
1849
1849
1850 for f, needs in needfiles.iteritems():
1850 for f, needs in needfiles.iteritems():
1851 fl = self.file(f)
1851 fl = self.file(f)
1852 for n in needs:
1852 for n in needs:
1853 try:
1853 try:
1854 fl.rev(n)
1854 fl.rev(n)
1855 except error.LookupError:
1855 except error.LookupError:
1856 raise util.Abort(
1856 raise util.Abort(
1857 _('missing file data for %s:%s - run hg verify') %
1857 _('missing file data for %s:%s - run hg verify') %
1858 (f, hex(n)))
1858 (f, hex(n)))
1859
1859
1860 dh = 0
1860 dh = 0
1861 if oldheads:
1861 if oldheads:
1862 heads = cl.heads()
1862 heads = cl.heads()
1863 dh = len(heads) - len(oldheads)
1863 dh = len(heads) - len(oldheads)
1864 for h in heads:
1864 for h in heads:
1865 if h not in oldheads and 'close' in self[h].extra():
1865 if h not in oldheads and 'close' in self[h].extra():
1866 dh -= 1
1866 dh -= 1
1867 htext = ""
1867 htext = ""
1868 if dh:
1868 if dh:
1869 htext = _(" (%+d heads)") % dh
1869 htext = _(" (%+d heads)") % dh
1870
1870
1871 self.ui.status(_("added %d changesets"
1871 self.ui.status(_("added %d changesets"
1872 " with %d changes to %d files%s\n")
1872 " with %d changes to %d files%s\n")
1873 % (changesets, revisions, files, htext))
1873 % (changesets, revisions, files, htext))
1874
1874
1875 if changesets > 0:
1875 if changesets > 0:
1876 p = lambda: cl.writepending() and self.root or ""
1876 p = lambda: cl.writepending() and self.root or ""
1877 self.hook('pretxnchangegroup', throw=True,
1877 self.hook('pretxnchangegroup', throw=True,
1878 node=hex(cl.node(clstart)), source=srctype,
1878 node=hex(cl.node(clstart)), source=srctype,
1879 url=url, pending=p)
1879 url=url, pending=p)
1880
1880
1881 # make changelog see real files again
1881 # make changelog see real files again
1882 cl.finalize(trp)
1882 cl.finalize(trp)
1883
1883
1884 tr.close()
1884 tr.close()
1885 finally:
1885 finally:
1886 tr.release()
1886 tr.release()
1887 if lock:
1887 if lock:
1888 lock.release()
1888 lock.release()
1889
1889
1890 if changesets > 0:
1890 if changesets > 0:
1891 # forcefully update the on-disk branch cache
1891 # forcefully update the on-disk branch cache
1892 self.ui.debug("updating the branch cache\n")
1892 self.ui.debug("updating the branch cache\n")
1893 self.updatebranchcache()
1893 self.updatebranchcache()
1894 self.hook("changegroup", node=hex(cl.node(clstart)),
1894 self.hook("changegroup", node=hex(cl.node(clstart)),
1895 source=srctype, url=url)
1895 source=srctype, url=url)
1896
1896
1897 for i in xrange(clstart, clend):
1897 for i in xrange(clstart, clend):
1898 self.hook("incoming", node=hex(cl.node(i)),
1898 self.hook("incoming", node=hex(cl.node(i)),
1899 source=srctype, url=url)
1899 source=srctype, url=url)
1900
1900
1901 # never return 0 here:
1901 # never return 0 here:
1902 if dh < 0:
1902 if dh < 0:
1903 return dh - 1
1903 return dh - 1
1904 else:
1904 else:
1905 return dh + 1
1905 return dh + 1
1906
1906
1907 def stream_in(self, remote, requirements):
1907 def stream_in(self, remote, requirements):
1908 lock = self.lock()
1908 lock = self.lock()
1909 try:
1909 try:
1910 fp = remote.stream_out()
1910 fp = remote.stream_out()
1911 l = fp.readline()
1911 l = fp.readline()
1912 try:
1912 try:
1913 resp = int(l)
1913 resp = int(l)
1914 except ValueError:
1914 except ValueError:
1915 raise error.ResponseError(
1915 raise error.ResponseError(
1916 _('Unexpected response from remote server:'), l)
1916 _('Unexpected response from remote server:'), l)
1917 if resp == 1:
1917 if resp == 1:
1918 raise util.Abort(_('operation forbidden by server'))
1918 raise util.Abort(_('operation forbidden by server'))
1919 elif resp == 2:
1919 elif resp == 2:
1920 raise util.Abort(_('locking the remote repository failed'))
1920 raise util.Abort(_('locking the remote repository failed'))
1921 elif resp != 0:
1921 elif resp != 0:
1922 raise util.Abort(_('the server sent an unknown error code'))
1922 raise util.Abort(_('the server sent an unknown error code'))
1923 self.ui.status(_('streaming all changes\n'))
1923 self.ui.status(_('streaming all changes\n'))
1924 l = fp.readline()
1924 l = fp.readline()
1925 try:
1925 try:
1926 total_files, total_bytes = map(int, l.split(' ', 1))
1926 total_files, total_bytes = map(int, l.split(' ', 1))
1927 except (ValueError, TypeError):
1927 except (ValueError, TypeError):
1928 raise error.ResponseError(
1928 raise error.ResponseError(
1929 _('Unexpected response from remote server:'), l)
1929 _('Unexpected response from remote server:'), l)
1930 self.ui.status(_('%d files to transfer, %s of data\n') %
1930 self.ui.status(_('%d files to transfer, %s of data\n') %
1931 (total_files, util.bytecount(total_bytes)))
1931 (total_files, util.bytecount(total_bytes)))
1932 start = time.time()
1932 start = time.time()
1933 for i in xrange(total_files):
1933 for i in xrange(total_files):
1934 # XXX doesn't support '\n' or '\r' in filenames
1934 # XXX doesn't support '\n' or '\r' in filenames
1935 l = fp.readline()
1935 l = fp.readline()
1936 try:
1936 try:
1937 name, size = l.split('\0', 1)
1937 name, size = l.split('\0', 1)
1938 size = int(size)
1938 size = int(size)
1939 except (ValueError, TypeError):
1939 except (ValueError, TypeError):
1940 raise error.ResponseError(
1940 raise error.ResponseError(
1941 _('Unexpected response from remote server:'), l)
1941 _('Unexpected response from remote server:'), l)
1942 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1942 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1943 # for backwards compat, name was partially encoded
1943 # for backwards compat, name was partially encoded
1944 ofp = self.sopener(store.decodedir(name), 'w')
1944 ofp = self.sopener(store.decodedir(name), 'w')
1945 for chunk in util.filechunkiter(fp, limit=size):
1945 for chunk in util.filechunkiter(fp, limit=size):
1946 ofp.write(chunk)
1946 ofp.write(chunk)
1947 ofp.close()
1947 ofp.close()
1948 elapsed = time.time() - start
1948 elapsed = time.time() - start
1949 if elapsed <= 0:
1949 if elapsed <= 0:
1950 elapsed = 0.001
1950 elapsed = 0.001
1951 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1951 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1952 (util.bytecount(total_bytes), elapsed,
1952 (util.bytecount(total_bytes), elapsed,
1953 util.bytecount(total_bytes / elapsed)))
1953 util.bytecount(total_bytes / elapsed)))
1954
1954
1955 # new requirements = old non-format requirements + new format-related
1955 # new requirements = old non-format requirements + new format-related
1956 # requirements from the streamed-in repository
1956 # requirements from the streamed-in repository
1957 requirements.update(set(self.requirements) - self.supportedformats)
1957 requirements.update(set(self.requirements) - self.supportedformats)
1958 self._applyrequirements(requirements)
1958 self._applyrequirements(requirements)
1959 self._writerequirements()
1959 self._writerequirements()
1960
1960
1961 self.invalidate()
1961 self.invalidate()
1962 return len(self.heads()) + 1
1962 return len(self.heads()) + 1
1963 finally:
1963 finally:
1964 lock.release()
1964 lock.release()
1965
1965
1966 def clone(self, remote, heads=[], stream=False):
1966 def clone(self, remote, heads=[], stream=False):
1967 '''clone remote repository.
1967 '''clone remote repository.
1968
1968
1969 keyword arguments:
1969 keyword arguments:
1970 heads: list of revs to clone (forces use of pull)
1970 heads: list of revs to clone (forces use of pull)
1971 stream: use streaming clone if possible'''
1971 stream: use streaming clone if possible'''
1972
1972
1973 # now, all clients that can request uncompressed clones can
1973 # now, all clients that can request uncompressed clones can
1974 # read repo formats supported by all servers that can serve
1974 # read repo formats supported by all servers that can serve
1975 # them.
1975 # them.
1976
1976
1977 # if revlog format changes, client will have to check version
1977 # if revlog format changes, client will have to check version
1978 # and format flags on "stream" capability, and use
1978 # and format flags on "stream" capability, and use
1979 # uncompressed only if compatible.
1979 # uncompressed only if compatible.
1980
1980
1981 if stream and not heads:
1981 if stream and not heads:
1982 # 'stream' means remote revlog format is revlogv1 only
1982 # 'stream' means remote revlog format is revlogv1 only
1983 if remote.capable('stream'):
1983 if remote.capable('stream'):
1984 return self.stream_in(remote, set(('revlogv1',)))
1984 return self.stream_in(remote, set(('revlogv1',)))
1985 # otherwise, 'streamreqs' contains the remote revlog format
1985 # otherwise, 'streamreqs' contains the remote revlog format
1986 streamreqs = remote.capable('streamreqs')
1986 streamreqs = remote.capable('streamreqs')
1987 if streamreqs:
1987 if streamreqs:
1988 streamreqs = set(streamreqs.split(','))
1988 streamreqs = set(streamreqs.split(','))
1989 # if we support it, stream in and adjust our requirements
1989 # if we support it, stream in and adjust our requirements
1990 if not streamreqs - self.supportedformats:
1990 if not streamreqs - self.supportedformats:
1991 return self.stream_in(remote, streamreqs)
1991 return self.stream_in(remote, streamreqs)
1992 return self.pull(remote, heads)
1992 return self.pull(remote, heads)
1993
1993
1994 def pushkey(self, namespace, key, old, new):
1994 def pushkey(self, namespace, key, old, new):
1995 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1995 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1996 old=old, new=new)
1996 old=old, new=new)
1997 ret = pushkey.push(self, namespace, key, old, new)
1997 ret = pushkey.push(self, namespace, key, old, new)
1998 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1998 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1999 ret=ret)
1999 ret=ret)
2000 return ret
2000 return ret
2001
2001
2002 def listkeys(self, namespace):
2002 def listkeys(self, namespace):
2003 self.hook('prelistkeys', throw=True, namespace=namespace)
2003 self.hook('prelistkeys', throw=True, namespace=namespace)
2004 values = pushkey.list(self, namespace)
2004 values = pushkey.list(self, namespace)
2005 self.hook('listkeys', namespace=namespace, values=values)
2005 self.hook('listkeys', namespace=namespace, values=values)
2006 return values
2006 return values
2007
2007
2008 def debugwireargs(self, one, two, three=None, four=None, five=None):
2008 def debugwireargs(self, one, two, three=None, four=None, five=None):
2009 '''used to test argument passing over the wire'''
2009 '''used to test argument passing over the wire'''
2010 return "%s %s %s %s %s" % (one, two, three, four, five)
2010 return "%s %s %s %s %s" % (one, two, three, four, five)
2011
2011
2012 def savecommitmessage(self, text):
2012 def savecommitmessage(self, text):
2013 fp = self.opener('last-message.txt', 'wb')
2013 fp = self.opener('last-message.txt', 'wb')
2014 try:
2014 try:
2015 fp.write(text)
2015 fp.write(text)
2016 finally:
2016 finally:
2017 fp.close()
2017 fp.close()
2018 return self.pathto(fp.name[len(self.root)+1:])
2018 return self.pathto(fp.name[len(self.root)+1:])
2019
2019
2020 # used to avoid circular references so destructors work
2020 # used to avoid circular references so destructors work
2021 def aftertrans(files):
2021 def aftertrans(files):
2022 renamefiles = [tuple(t) for t in files]
2022 renamefiles = [tuple(t) for t in files]
2023 def a():
2023 def a():
2024 for src, dest in renamefiles:
2024 for src, dest in renamefiles:
2025 util.rename(src, dest)
2025 util.rename(src, dest)
2026 return a
2026 return a
2027
2027
2028 def undoname(fn):
2028 def undoname(fn):
2029 base, name = os.path.split(fn)
2029 base, name = os.path.split(fn)
2030 assert name.startswith('journal')
2030 assert name.startswith('journal')
2031 return os.path.join(base, name.replace('journal', 'undo', 1))
2031 return os.path.join(base, name.replace('journal', 'undo', 1))
2032
2032
2033 def instance(ui, path, create):
2033 def instance(ui, path, create):
2034 return localrepository(ui, util.urllocalpath(path), create)
2034 return localrepository(ui, util.urllocalpath(path), create)
2035
2035
2036 def islocal(path):
2036 def islocal(path):
2037 return True
2037 return True
General Comments 0
You need to be logged in to leave comments. Login now