##// END OF EJS Templates
localrepo: fix comment on set
Matt Mackall -
r14904:ff2d907a default
parent child Browse files
Show More
@@ -1,2002 +1,2002 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context, bookmarks
11 import changelog, dirstate, filelog, manifest, context, bookmarks
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import scmutil, util, extensions, hook, error, revset
13 import scmutil, util, extensions, hook, error, revset
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 from lock import release
17 from lock import release
18 import weakref, errno, os, time, inspect
18 import weakref, errno, os, time, inspect
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20
20
21 class localrepository(repo.repository):
21 class localrepository(repo.repository):
22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
23 'known', 'getbundle'))
23 'known', 'getbundle'))
24 supportedformats = set(('revlogv1', 'generaldelta'))
24 supportedformats = set(('revlogv1', 'generaldelta'))
25 supported = supportedformats | set(('store', 'fncache', 'shared',
25 supported = supportedformats | set(('store', 'fncache', 'shared',
26 'dotencode'))
26 'dotencode'))
27
27
28 def __init__(self, baseui, path=None, create=False):
28 def __init__(self, baseui, path=None, create=False):
29 repo.repository.__init__(self)
29 repo.repository.__init__(self)
30 self.root = os.path.realpath(util.expandpath(path))
30 self.root = os.path.realpath(util.expandpath(path))
31 self.path = os.path.join(self.root, ".hg")
31 self.path = os.path.join(self.root, ".hg")
32 self.origroot = path
32 self.origroot = path
33 self.auditor = scmutil.pathauditor(self.root, self._checknested)
33 self.auditor = scmutil.pathauditor(self.root, self._checknested)
34 self.opener = scmutil.opener(self.path)
34 self.opener = scmutil.opener(self.path)
35 self.wopener = scmutil.opener(self.root)
35 self.wopener = scmutil.opener(self.root)
36 self.baseui = baseui
36 self.baseui = baseui
37 self.ui = baseui.copy()
37 self.ui = baseui.copy()
38
38
39 try:
39 try:
40 self.ui.readconfig(self.join("hgrc"), self.root)
40 self.ui.readconfig(self.join("hgrc"), self.root)
41 extensions.loadall(self.ui)
41 extensions.loadall(self.ui)
42 except IOError:
42 except IOError:
43 pass
43 pass
44
44
45 if not os.path.isdir(self.path):
45 if not os.path.isdir(self.path):
46 if create:
46 if create:
47 if not os.path.exists(path):
47 if not os.path.exists(path):
48 util.makedirs(path)
48 util.makedirs(path)
49 util.makedir(self.path, notindexed=True)
49 util.makedir(self.path, notindexed=True)
50 requirements = ["revlogv1"]
50 requirements = ["revlogv1"]
51 if self.ui.configbool('format', 'usestore', True):
51 if self.ui.configbool('format', 'usestore', True):
52 os.mkdir(os.path.join(self.path, "store"))
52 os.mkdir(os.path.join(self.path, "store"))
53 requirements.append("store")
53 requirements.append("store")
54 if self.ui.configbool('format', 'usefncache', True):
54 if self.ui.configbool('format', 'usefncache', True):
55 requirements.append("fncache")
55 requirements.append("fncache")
56 if self.ui.configbool('format', 'dotencode', True):
56 if self.ui.configbool('format', 'dotencode', True):
57 requirements.append('dotencode')
57 requirements.append('dotencode')
58 # create an invalid changelog
58 # create an invalid changelog
59 self.opener.append(
59 self.opener.append(
60 "00changelog.i",
60 "00changelog.i",
61 '\0\0\0\2' # represents revlogv2
61 '\0\0\0\2' # represents revlogv2
62 ' dummy changelog to prevent using the old repo layout'
62 ' dummy changelog to prevent using the old repo layout'
63 )
63 )
64 if self.ui.configbool('format', 'generaldelta', False):
64 if self.ui.configbool('format', 'generaldelta', False):
65 requirements.append("generaldelta")
65 requirements.append("generaldelta")
66 else:
66 else:
67 raise error.RepoError(_("repository %s not found") % path)
67 raise error.RepoError(_("repository %s not found") % path)
68 elif create:
68 elif create:
69 raise error.RepoError(_("repository %s already exists") % path)
69 raise error.RepoError(_("repository %s already exists") % path)
70 else:
70 else:
71 try:
71 try:
72 requirements = scmutil.readrequires(self.opener, self.supported)
72 requirements = scmutil.readrequires(self.opener, self.supported)
73 except IOError, inst:
73 except IOError, inst:
74 if inst.errno != errno.ENOENT:
74 if inst.errno != errno.ENOENT:
75 raise
75 raise
76 requirements = set()
76 requirements = set()
77
77
78 self.sharedpath = self.path
78 self.sharedpath = self.path
79 try:
79 try:
80 s = os.path.realpath(self.opener.read("sharedpath"))
80 s = os.path.realpath(self.opener.read("sharedpath"))
81 if not os.path.exists(s):
81 if not os.path.exists(s):
82 raise error.RepoError(
82 raise error.RepoError(
83 _('.hg/sharedpath points to nonexistent directory %s') % s)
83 _('.hg/sharedpath points to nonexistent directory %s') % s)
84 self.sharedpath = s
84 self.sharedpath = s
85 except IOError, inst:
85 except IOError, inst:
86 if inst.errno != errno.ENOENT:
86 if inst.errno != errno.ENOENT:
87 raise
87 raise
88
88
89 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
89 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
90 self.spath = self.store.path
90 self.spath = self.store.path
91 self.sopener = self.store.opener
91 self.sopener = self.store.opener
92 self.sjoin = self.store.join
92 self.sjoin = self.store.join
93 self.opener.createmode = self.store.createmode
93 self.opener.createmode = self.store.createmode
94 self._applyrequirements(requirements)
94 self._applyrequirements(requirements)
95 if create:
95 if create:
96 self._writerequirements()
96 self._writerequirements()
97
97
98 # These two define the set of tags for this repository. _tags
98 # These two define the set of tags for this repository. _tags
99 # maps tag name to node; _tagtypes maps tag name to 'global' or
99 # maps tag name to node; _tagtypes maps tag name to 'global' or
100 # 'local'. (Global tags are defined by .hgtags across all
100 # 'local'. (Global tags are defined by .hgtags across all
101 # heads, and local tags are defined in .hg/localtags.) They
101 # heads, and local tags are defined in .hg/localtags.) They
102 # constitute the in-memory cache of tags.
102 # constitute the in-memory cache of tags.
103 self._tags = None
103 self._tags = None
104 self._tagtypes = None
104 self._tagtypes = None
105
105
106 self._branchcache = None
106 self._branchcache = None
107 self._branchcachetip = None
107 self._branchcachetip = None
108 self.nodetagscache = None
108 self.nodetagscache = None
109 self.filterpats = {}
109 self.filterpats = {}
110 self._datafilters = {}
110 self._datafilters = {}
111 self._transref = self._lockref = self._wlockref = None
111 self._transref = self._lockref = self._wlockref = None
112
112
113 def _applyrequirements(self, requirements):
113 def _applyrequirements(self, requirements):
114 self.requirements = requirements
114 self.requirements = requirements
115 openerreqs = set(('revlogv1', 'generaldelta'))
115 openerreqs = set(('revlogv1', 'generaldelta'))
116 self.sopener.options = dict((r, 1) for r in requirements
116 self.sopener.options = dict((r, 1) for r in requirements
117 if r in openerreqs)
117 if r in openerreqs)
118
118
119 def _writerequirements(self):
119 def _writerequirements(self):
120 reqfile = self.opener("requires", "w")
120 reqfile = self.opener("requires", "w")
121 for r in self.requirements:
121 for r in self.requirements:
122 reqfile.write("%s\n" % r)
122 reqfile.write("%s\n" % r)
123 reqfile.close()
123 reqfile.close()
124
124
125 def _checknested(self, path):
125 def _checknested(self, path):
126 """Determine if path is a legal nested repository."""
126 """Determine if path is a legal nested repository."""
127 if not path.startswith(self.root):
127 if not path.startswith(self.root):
128 return False
128 return False
129 subpath = path[len(self.root) + 1:]
129 subpath = path[len(self.root) + 1:]
130
130
131 # XXX: Checking against the current working copy is wrong in
131 # XXX: Checking against the current working copy is wrong in
132 # the sense that it can reject things like
132 # the sense that it can reject things like
133 #
133 #
134 # $ hg cat -r 10 sub/x.txt
134 # $ hg cat -r 10 sub/x.txt
135 #
135 #
136 # if sub/ is no longer a subrepository in the working copy
136 # if sub/ is no longer a subrepository in the working copy
137 # parent revision.
137 # parent revision.
138 #
138 #
139 # However, it can of course also allow things that would have
139 # However, it can of course also allow things that would have
140 # been rejected before, such as the above cat command if sub/
140 # been rejected before, such as the above cat command if sub/
141 # is a subrepository now, but was a normal directory before.
141 # is a subrepository now, but was a normal directory before.
142 # The old path auditor would have rejected by mistake since it
142 # The old path auditor would have rejected by mistake since it
143 # panics when it sees sub/.hg/.
143 # panics when it sees sub/.hg/.
144 #
144 #
145 # All in all, checking against the working copy seems sensible
145 # All in all, checking against the working copy seems sensible
146 # since we want to prevent access to nested repositories on
146 # since we want to prevent access to nested repositories on
147 # the filesystem *now*.
147 # the filesystem *now*.
148 ctx = self[None]
148 ctx = self[None]
149 parts = util.splitpath(subpath)
149 parts = util.splitpath(subpath)
150 while parts:
150 while parts:
151 prefix = os.sep.join(parts)
151 prefix = os.sep.join(parts)
152 if prefix in ctx.substate:
152 if prefix in ctx.substate:
153 if prefix == subpath:
153 if prefix == subpath:
154 return True
154 return True
155 else:
155 else:
156 sub = ctx.sub(prefix)
156 sub = ctx.sub(prefix)
157 return sub.checknested(subpath[len(prefix) + 1:])
157 return sub.checknested(subpath[len(prefix) + 1:])
158 else:
158 else:
159 parts.pop()
159 parts.pop()
160 return False
160 return False
161
161
162 @util.propertycache
162 @util.propertycache
163 def _bookmarks(self):
163 def _bookmarks(self):
164 return bookmarks.read(self)
164 return bookmarks.read(self)
165
165
166 @util.propertycache
166 @util.propertycache
167 def _bookmarkcurrent(self):
167 def _bookmarkcurrent(self):
168 return bookmarks.readcurrent(self)
168 return bookmarks.readcurrent(self)
169
169
170 @propertycache
170 @propertycache
171 def changelog(self):
171 def changelog(self):
172 c = changelog.changelog(self.sopener)
172 c = changelog.changelog(self.sopener)
173 if 'HG_PENDING' in os.environ:
173 if 'HG_PENDING' in os.environ:
174 p = os.environ['HG_PENDING']
174 p = os.environ['HG_PENDING']
175 if p.startswith(self.root):
175 if p.startswith(self.root):
176 c.readpending('00changelog.i.a')
176 c.readpending('00changelog.i.a')
177 return c
177 return c
178
178
179 @propertycache
179 @propertycache
180 def manifest(self):
180 def manifest(self):
181 return manifest.manifest(self.sopener)
181 return manifest.manifest(self.sopener)
182
182
183 @propertycache
183 @propertycache
184 def dirstate(self):
184 def dirstate(self):
185 warned = [0]
185 warned = [0]
186 def validate(node):
186 def validate(node):
187 try:
187 try:
188 self.changelog.rev(node)
188 self.changelog.rev(node)
189 return node
189 return node
190 except error.LookupError:
190 except error.LookupError:
191 if not warned[0]:
191 if not warned[0]:
192 warned[0] = True
192 warned[0] = True
193 self.ui.warn(_("warning: ignoring unknown"
193 self.ui.warn(_("warning: ignoring unknown"
194 " working parent %s!\n") % short(node))
194 " working parent %s!\n") % short(node))
195 return nullid
195 return nullid
196
196
197 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
197 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
198
198
199 def __getitem__(self, changeid):
199 def __getitem__(self, changeid):
200 if changeid is None:
200 if changeid is None:
201 return context.workingctx(self)
201 return context.workingctx(self)
202 return context.changectx(self, changeid)
202 return context.changectx(self, changeid)
203
203
204 def __contains__(self, changeid):
204 def __contains__(self, changeid):
205 try:
205 try:
206 return bool(self.lookup(changeid))
206 return bool(self.lookup(changeid))
207 except error.RepoLookupError:
207 except error.RepoLookupError:
208 return False
208 return False
209
209
210 def __nonzero__(self):
210 def __nonzero__(self):
211 return True
211 return True
212
212
213 def __len__(self):
213 def __len__(self):
214 return len(self.changelog)
214 return len(self.changelog)
215
215
216 def __iter__(self):
216 def __iter__(self):
217 for i in xrange(len(self)):
217 for i in xrange(len(self)):
218 yield i
218 yield i
219
219
220 def set(self, expr, *args):
220 def set(self, expr, *args):
221 '''
221 '''
222 Yield a context for each matching revision, after doing arg
222 Yield a context for each matching revision, after doing arg
223 replacement via formatrevspec
223 replacement via revset.formatspec
224 '''
224 '''
225
225
226 expr = revset.formatspec(expr, *args)
226 expr = revset.formatspec(expr, *args)
227 m = revset.match(None, expr)
227 m = revset.match(None, expr)
228 for r in m(self, range(len(self))):
228 for r in m(self, range(len(self))):
229 yield self[r]
229 yield self[r]
230
230
231 def url(self):
231 def url(self):
232 return 'file:' + self.root
232 return 'file:' + self.root
233
233
234 def hook(self, name, throw=False, **args):
234 def hook(self, name, throw=False, **args):
235 return hook.hook(self.ui, self, name, throw, **args)
235 return hook.hook(self.ui, self, name, throw, **args)
236
236
237 tag_disallowed = ':\r\n'
237 tag_disallowed = ':\r\n'
238
238
239 def _tag(self, names, node, message, local, user, date, extra={}):
239 def _tag(self, names, node, message, local, user, date, extra={}):
240 if isinstance(names, str):
240 if isinstance(names, str):
241 allchars = names
241 allchars = names
242 names = (names,)
242 names = (names,)
243 else:
243 else:
244 allchars = ''.join(names)
244 allchars = ''.join(names)
245 for c in self.tag_disallowed:
245 for c in self.tag_disallowed:
246 if c in allchars:
246 if c in allchars:
247 raise util.Abort(_('%r cannot be used in a tag name') % c)
247 raise util.Abort(_('%r cannot be used in a tag name') % c)
248
248
249 branches = self.branchmap()
249 branches = self.branchmap()
250 for name in names:
250 for name in names:
251 self.hook('pretag', throw=True, node=hex(node), tag=name,
251 self.hook('pretag', throw=True, node=hex(node), tag=name,
252 local=local)
252 local=local)
253 if name in branches:
253 if name in branches:
254 self.ui.warn(_("warning: tag %s conflicts with existing"
254 self.ui.warn(_("warning: tag %s conflicts with existing"
255 " branch name\n") % name)
255 " branch name\n") % name)
256
256
257 def writetags(fp, names, munge, prevtags):
257 def writetags(fp, names, munge, prevtags):
258 fp.seek(0, 2)
258 fp.seek(0, 2)
259 if prevtags and prevtags[-1] != '\n':
259 if prevtags and prevtags[-1] != '\n':
260 fp.write('\n')
260 fp.write('\n')
261 for name in names:
261 for name in names:
262 m = munge and munge(name) or name
262 m = munge and munge(name) or name
263 if self._tagtypes and name in self._tagtypes:
263 if self._tagtypes and name in self._tagtypes:
264 old = self._tags.get(name, nullid)
264 old = self._tags.get(name, nullid)
265 fp.write('%s %s\n' % (hex(old), m))
265 fp.write('%s %s\n' % (hex(old), m))
266 fp.write('%s %s\n' % (hex(node), m))
266 fp.write('%s %s\n' % (hex(node), m))
267 fp.close()
267 fp.close()
268
268
269 prevtags = ''
269 prevtags = ''
270 if local:
270 if local:
271 try:
271 try:
272 fp = self.opener('localtags', 'r+')
272 fp = self.opener('localtags', 'r+')
273 except IOError:
273 except IOError:
274 fp = self.opener('localtags', 'a')
274 fp = self.opener('localtags', 'a')
275 else:
275 else:
276 prevtags = fp.read()
276 prevtags = fp.read()
277
277
278 # local tags are stored in the current charset
278 # local tags are stored in the current charset
279 writetags(fp, names, None, prevtags)
279 writetags(fp, names, None, prevtags)
280 for name in names:
280 for name in names:
281 self.hook('tag', node=hex(node), tag=name, local=local)
281 self.hook('tag', node=hex(node), tag=name, local=local)
282 return
282 return
283
283
284 try:
284 try:
285 fp = self.wfile('.hgtags', 'rb+')
285 fp = self.wfile('.hgtags', 'rb+')
286 except IOError, e:
286 except IOError, e:
287 if e.errno != errno.ENOENT:
287 if e.errno != errno.ENOENT:
288 raise
288 raise
289 fp = self.wfile('.hgtags', 'ab')
289 fp = self.wfile('.hgtags', 'ab')
290 else:
290 else:
291 prevtags = fp.read()
291 prevtags = fp.read()
292
292
293 # committed tags are stored in UTF-8
293 # committed tags are stored in UTF-8
294 writetags(fp, names, encoding.fromlocal, prevtags)
294 writetags(fp, names, encoding.fromlocal, prevtags)
295
295
296 fp.close()
296 fp.close()
297
297
298 if '.hgtags' not in self.dirstate:
298 if '.hgtags' not in self.dirstate:
299 self[None].add(['.hgtags'])
299 self[None].add(['.hgtags'])
300
300
301 m = matchmod.exact(self.root, '', ['.hgtags'])
301 m = matchmod.exact(self.root, '', ['.hgtags'])
302 tagnode = self.commit(message, user, date, extra=extra, match=m)
302 tagnode = self.commit(message, user, date, extra=extra, match=m)
303
303
304 for name in names:
304 for name in names:
305 self.hook('tag', node=hex(node), tag=name, local=local)
305 self.hook('tag', node=hex(node), tag=name, local=local)
306
306
307 return tagnode
307 return tagnode
308
308
309 def tag(self, names, node, message, local, user, date):
309 def tag(self, names, node, message, local, user, date):
310 '''tag a revision with one or more symbolic names.
310 '''tag a revision with one or more symbolic names.
311
311
312 names is a list of strings or, when adding a single tag, names may be a
312 names is a list of strings or, when adding a single tag, names may be a
313 string.
313 string.
314
314
315 if local is True, the tags are stored in a per-repository file.
315 if local is True, the tags are stored in a per-repository file.
316 otherwise, they are stored in the .hgtags file, and a new
316 otherwise, they are stored in the .hgtags file, and a new
317 changeset is committed with the change.
317 changeset is committed with the change.
318
318
319 keyword arguments:
319 keyword arguments:
320
320
321 local: whether to store tags in non-version-controlled file
321 local: whether to store tags in non-version-controlled file
322 (default False)
322 (default False)
323
323
324 message: commit message to use if committing
324 message: commit message to use if committing
325
325
326 user: name of user to use if committing
326 user: name of user to use if committing
327
327
328 date: date tuple to use if committing'''
328 date: date tuple to use if committing'''
329
329
330 if not local:
330 if not local:
331 for x in self.status()[:5]:
331 for x in self.status()[:5]:
332 if '.hgtags' in x:
332 if '.hgtags' in x:
333 raise util.Abort(_('working copy of .hgtags is changed '
333 raise util.Abort(_('working copy of .hgtags is changed '
334 '(please commit .hgtags manually)'))
334 '(please commit .hgtags manually)'))
335
335
336 self.tags() # instantiate the cache
336 self.tags() # instantiate the cache
337 self._tag(names, node, message, local, user, date)
337 self._tag(names, node, message, local, user, date)
338
338
339 def tags(self):
339 def tags(self):
340 '''return a mapping of tag to node'''
340 '''return a mapping of tag to node'''
341 if self._tags is None:
341 if self._tags is None:
342 (self._tags, self._tagtypes) = self._findtags()
342 (self._tags, self._tagtypes) = self._findtags()
343
343
344 return self._tags
344 return self._tags
345
345
346 def _findtags(self):
346 def _findtags(self):
347 '''Do the hard work of finding tags. Return a pair of dicts
347 '''Do the hard work of finding tags. Return a pair of dicts
348 (tags, tagtypes) where tags maps tag name to node, and tagtypes
348 (tags, tagtypes) where tags maps tag name to node, and tagtypes
349 maps tag name to a string like \'global\' or \'local\'.
349 maps tag name to a string like \'global\' or \'local\'.
350 Subclasses or extensions are free to add their own tags, but
350 Subclasses or extensions are free to add their own tags, but
351 should be aware that the returned dicts will be retained for the
351 should be aware that the returned dicts will be retained for the
352 duration of the localrepo object.'''
352 duration of the localrepo object.'''
353
353
354 # XXX what tagtype should subclasses/extensions use? Currently
354 # XXX what tagtype should subclasses/extensions use? Currently
355 # mq and bookmarks add tags, but do not set the tagtype at all.
355 # mq and bookmarks add tags, but do not set the tagtype at all.
356 # Should each extension invent its own tag type? Should there
356 # Should each extension invent its own tag type? Should there
357 # be one tagtype for all such "virtual" tags? Or is the status
357 # be one tagtype for all such "virtual" tags? Or is the status
358 # quo fine?
358 # quo fine?
359
359
360 alltags = {} # map tag name to (node, hist)
360 alltags = {} # map tag name to (node, hist)
361 tagtypes = {}
361 tagtypes = {}
362
362
363 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
363 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
364 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
364 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
365
365
366 # Build the return dicts. Have to re-encode tag names because
366 # Build the return dicts. Have to re-encode tag names because
367 # the tags module always uses UTF-8 (in order not to lose info
367 # the tags module always uses UTF-8 (in order not to lose info
368 # writing to the cache), but the rest of Mercurial wants them in
368 # writing to the cache), but the rest of Mercurial wants them in
369 # local encoding.
369 # local encoding.
370 tags = {}
370 tags = {}
371 for (name, (node, hist)) in alltags.iteritems():
371 for (name, (node, hist)) in alltags.iteritems():
372 if node != nullid:
372 if node != nullid:
373 try:
373 try:
374 # ignore tags to unknown nodes
374 # ignore tags to unknown nodes
375 self.changelog.lookup(node)
375 self.changelog.lookup(node)
376 tags[encoding.tolocal(name)] = node
376 tags[encoding.tolocal(name)] = node
377 except error.LookupError:
377 except error.LookupError:
378 pass
378 pass
379 tags['tip'] = self.changelog.tip()
379 tags['tip'] = self.changelog.tip()
380 tagtypes = dict([(encoding.tolocal(name), value)
380 tagtypes = dict([(encoding.tolocal(name), value)
381 for (name, value) in tagtypes.iteritems()])
381 for (name, value) in tagtypes.iteritems()])
382 return (tags, tagtypes)
382 return (tags, tagtypes)
383
383
384 def tagtype(self, tagname):
384 def tagtype(self, tagname):
385 '''
385 '''
386 return the type of the given tag. result can be:
386 return the type of the given tag. result can be:
387
387
388 'local' : a local tag
388 'local' : a local tag
389 'global' : a global tag
389 'global' : a global tag
390 None : tag does not exist
390 None : tag does not exist
391 '''
391 '''
392
392
393 self.tags()
393 self.tags()
394
394
395 return self._tagtypes.get(tagname)
395 return self._tagtypes.get(tagname)
396
396
397 def tagslist(self):
397 def tagslist(self):
398 '''return a list of tags ordered by revision'''
398 '''return a list of tags ordered by revision'''
399 l = []
399 l = []
400 for t, n in self.tags().iteritems():
400 for t, n in self.tags().iteritems():
401 r = self.changelog.rev(n)
401 r = self.changelog.rev(n)
402 l.append((r, t, n))
402 l.append((r, t, n))
403 return [(t, n) for r, t, n in sorted(l)]
403 return [(t, n) for r, t, n in sorted(l)]
404
404
405 def nodetags(self, node):
405 def nodetags(self, node):
406 '''return the tags associated with a node'''
406 '''return the tags associated with a node'''
407 if not self.nodetagscache:
407 if not self.nodetagscache:
408 self.nodetagscache = {}
408 self.nodetagscache = {}
409 for t, n in self.tags().iteritems():
409 for t, n in self.tags().iteritems():
410 self.nodetagscache.setdefault(n, []).append(t)
410 self.nodetagscache.setdefault(n, []).append(t)
411 for tags in self.nodetagscache.itervalues():
411 for tags in self.nodetagscache.itervalues():
412 tags.sort()
412 tags.sort()
413 return self.nodetagscache.get(node, [])
413 return self.nodetagscache.get(node, [])
414
414
415 def nodebookmarks(self, node):
415 def nodebookmarks(self, node):
416 marks = []
416 marks = []
417 for bookmark, n in self._bookmarks.iteritems():
417 for bookmark, n in self._bookmarks.iteritems():
418 if n == node:
418 if n == node:
419 marks.append(bookmark)
419 marks.append(bookmark)
420 return sorted(marks)
420 return sorted(marks)
421
421
422 def _branchtags(self, partial, lrev):
422 def _branchtags(self, partial, lrev):
423 # TODO: rename this function?
423 # TODO: rename this function?
424 tiprev = len(self) - 1
424 tiprev = len(self) - 1
425 if lrev != tiprev:
425 if lrev != tiprev:
426 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
426 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
427 self._updatebranchcache(partial, ctxgen)
427 self._updatebranchcache(partial, ctxgen)
428 self._writebranchcache(partial, self.changelog.tip(), tiprev)
428 self._writebranchcache(partial, self.changelog.tip(), tiprev)
429
429
430 return partial
430 return partial
431
431
432 def updatebranchcache(self):
432 def updatebranchcache(self):
433 tip = self.changelog.tip()
433 tip = self.changelog.tip()
434 if self._branchcache is not None and self._branchcachetip == tip:
434 if self._branchcache is not None and self._branchcachetip == tip:
435 return self._branchcache
435 return self._branchcache
436
436
437 oldtip = self._branchcachetip
437 oldtip = self._branchcachetip
438 self._branchcachetip = tip
438 self._branchcachetip = tip
439 if oldtip is None or oldtip not in self.changelog.nodemap:
439 if oldtip is None or oldtip not in self.changelog.nodemap:
440 partial, last, lrev = self._readbranchcache()
440 partial, last, lrev = self._readbranchcache()
441 else:
441 else:
442 lrev = self.changelog.rev(oldtip)
442 lrev = self.changelog.rev(oldtip)
443 partial = self._branchcache
443 partial = self._branchcache
444
444
445 self._branchtags(partial, lrev)
445 self._branchtags(partial, lrev)
446 # this private cache holds all heads (not just tips)
446 # this private cache holds all heads (not just tips)
447 self._branchcache = partial
447 self._branchcache = partial
448
448
449 def branchmap(self):
449 def branchmap(self):
450 '''returns a dictionary {branch: [branchheads]}'''
450 '''returns a dictionary {branch: [branchheads]}'''
451 self.updatebranchcache()
451 self.updatebranchcache()
452 return self._branchcache
452 return self._branchcache
453
453
454 def branchtags(self):
454 def branchtags(self):
455 '''return a dict where branch names map to the tipmost head of
455 '''return a dict where branch names map to the tipmost head of
456 the branch, open heads come before closed'''
456 the branch, open heads come before closed'''
457 bt = {}
457 bt = {}
458 for bn, heads in self.branchmap().iteritems():
458 for bn, heads in self.branchmap().iteritems():
459 tip = heads[-1]
459 tip = heads[-1]
460 for h in reversed(heads):
460 for h in reversed(heads):
461 if 'close' not in self.changelog.read(h)[5]:
461 if 'close' not in self.changelog.read(h)[5]:
462 tip = h
462 tip = h
463 break
463 break
464 bt[bn] = tip
464 bt[bn] = tip
465 return bt
465 return bt
466
466
467 def _readbranchcache(self):
467 def _readbranchcache(self):
468 partial = {}
468 partial = {}
469 try:
469 try:
470 f = self.opener("cache/branchheads")
470 f = self.opener("cache/branchheads")
471 lines = f.read().split('\n')
471 lines = f.read().split('\n')
472 f.close()
472 f.close()
473 except (IOError, OSError):
473 except (IOError, OSError):
474 return {}, nullid, nullrev
474 return {}, nullid, nullrev
475
475
476 try:
476 try:
477 last, lrev = lines.pop(0).split(" ", 1)
477 last, lrev = lines.pop(0).split(" ", 1)
478 last, lrev = bin(last), int(lrev)
478 last, lrev = bin(last), int(lrev)
479 if lrev >= len(self) or self[lrev].node() != last:
479 if lrev >= len(self) or self[lrev].node() != last:
480 # invalidate the cache
480 # invalidate the cache
481 raise ValueError('invalidating branch cache (tip differs)')
481 raise ValueError('invalidating branch cache (tip differs)')
482 for l in lines:
482 for l in lines:
483 if not l:
483 if not l:
484 continue
484 continue
485 node, label = l.split(" ", 1)
485 node, label = l.split(" ", 1)
486 label = encoding.tolocal(label.strip())
486 label = encoding.tolocal(label.strip())
487 partial.setdefault(label, []).append(bin(node))
487 partial.setdefault(label, []).append(bin(node))
488 except KeyboardInterrupt:
488 except KeyboardInterrupt:
489 raise
489 raise
490 except Exception, inst:
490 except Exception, inst:
491 if self.ui.debugflag:
491 if self.ui.debugflag:
492 self.ui.warn(str(inst), '\n')
492 self.ui.warn(str(inst), '\n')
493 partial, last, lrev = {}, nullid, nullrev
493 partial, last, lrev = {}, nullid, nullrev
494 return partial, last, lrev
494 return partial, last, lrev
495
495
496 def _writebranchcache(self, branches, tip, tiprev):
496 def _writebranchcache(self, branches, tip, tiprev):
497 try:
497 try:
498 f = self.opener("cache/branchheads", "w", atomictemp=True)
498 f = self.opener("cache/branchheads", "w", atomictemp=True)
499 f.write("%s %s\n" % (hex(tip), tiprev))
499 f.write("%s %s\n" % (hex(tip), tiprev))
500 for label, nodes in branches.iteritems():
500 for label, nodes in branches.iteritems():
501 for node in nodes:
501 for node in nodes:
502 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
502 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
503 f.rename()
503 f.rename()
504 except (IOError, OSError):
504 except (IOError, OSError):
505 pass
505 pass
506
506
507 def _updatebranchcache(self, partial, ctxgen):
507 def _updatebranchcache(self, partial, ctxgen):
508 # collect new branch entries
508 # collect new branch entries
509 newbranches = {}
509 newbranches = {}
510 for c in ctxgen:
510 for c in ctxgen:
511 newbranches.setdefault(c.branch(), []).append(c.node())
511 newbranches.setdefault(c.branch(), []).append(c.node())
512 # if older branchheads are reachable from new ones, they aren't
512 # if older branchheads are reachable from new ones, they aren't
513 # really branchheads. Note checking parents is insufficient:
513 # really branchheads. Note checking parents is insufficient:
514 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
514 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
515 for branch, newnodes in newbranches.iteritems():
515 for branch, newnodes in newbranches.iteritems():
516 bheads = partial.setdefault(branch, [])
516 bheads = partial.setdefault(branch, [])
517 bheads.extend(newnodes)
517 bheads.extend(newnodes)
518 if len(bheads) <= 1:
518 if len(bheads) <= 1:
519 continue
519 continue
520 bheads = sorted(bheads, key=lambda x: self[x].rev())
520 bheads = sorted(bheads, key=lambda x: self[x].rev())
521 # starting from tip means fewer passes over reachable
521 # starting from tip means fewer passes over reachable
522 while newnodes:
522 while newnodes:
523 latest = newnodes.pop()
523 latest = newnodes.pop()
524 if latest not in bheads:
524 if latest not in bheads:
525 continue
525 continue
526 minbhrev = self[bheads[0]].node()
526 minbhrev = self[bheads[0]].node()
527 reachable = self.changelog.reachable(latest, minbhrev)
527 reachable = self.changelog.reachable(latest, minbhrev)
528 reachable.remove(latest)
528 reachable.remove(latest)
529 if reachable:
529 if reachable:
530 bheads = [b for b in bheads if b not in reachable]
530 bheads = [b for b in bheads if b not in reachable]
531 partial[branch] = bheads
531 partial[branch] = bheads
532
532
533 def lookup(self, key):
533 def lookup(self, key):
534 if isinstance(key, int):
534 if isinstance(key, int):
535 return self.changelog.node(key)
535 return self.changelog.node(key)
536 elif key == '.':
536 elif key == '.':
537 return self.dirstate.p1()
537 return self.dirstate.p1()
538 elif key == 'null':
538 elif key == 'null':
539 return nullid
539 return nullid
540 elif key == 'tip':
540 elif key == 'tip':
541 return self.changelog.tip()
541 return self.changelog.tip()
542 n = self.changelog._match(key)
542 n = self.changelog._match(key)
543 if n:
543 if n:
544 return n
544 return n
545 if key in self._bookmarks:
545 if key in self._bookmarks:
546 return self._bookmarks[key]
546 return self._bookmarks[key]
547 if key in self.tags():
547 if key in self.tags():
548 return self.tags()[key]
548 return self.tags()[key]
549 if key in self.branchtags():
549 if key in self.branchtags():
550 return self.branchtags()[key]
550 return self.branchtags()[key]
551 n = self.changelog._partialmatch(key)
551 n = self.changelog._partialmatch(key)
552 if n:
552 if n:
553 return n
553 return n
554
554
555 # can't find key, check if it might have come from damaged dirstate
555 # can't find key, check if it might have come from damaged dirstate
556 if key in self.dirstate.parents():
556 if key in self.dirstate.parents():
557 raise error.Abort(_("working directory has unknown parent '%s'!")
557 raise error.Abort(_("working directory has unknown parent '%s'!")
558 % short(key))
558 % short(key))
559 try:
559 try:
560 if len(key) == 20:
560 if len(key) == 20:
561 key = hex(key)
561 key = hex(key)
562 except TypeError:
562 except TypeError:
563 pass
563 pass
564 raise error.RepoLookupError(_("unknown revision '%s'") % key)
564 raise error.RepoLookupError(_("unknown revision '%s'") % key)
565
565
566 def lookupbranch(self, key, remote=None):
566 def lookupbranch(self, key, remote=None):
567 repo = remote or self
567 repo = remote or self
568 if key in repo.branchmap():
568 if key in repo.branchmap():
569 return key
569 return key
570
570
571 repo = (remote and remote.local()) and remote or self
571 repo = (remote and remote.local()) and remote or self
572 return repo[key].branch()
572 return repo[key].branch()
573
573
574 def known(self, nodes):
574 def known(self, nodes):
575 nm = self.changelog.nodemap
575 nm = self.changelog.nodemap
576 return [(n in nm) for n in nodes]
576 return [(n in nm) for n in nodes]
577
577
578 def local(self):
578 def local(self):
579 return self
579 return self
580
580
581 def join(self, f):
581 def join(self, f):
582 return os.path.join(self.path, f)
582 return os.path.join(self.path, f)
583
583
584 def wjoin(self, f):
584 def wjoin(self, f):
585 return os.path.join(self.root, f)
585 return os.path.join(self.root, f)
586
586
587 def file(self, f):
587 def file(self, f):
588 if f[0] == '/':
588 if f[0] == '/':
589 f = f[1:]
589 f = f[1:]
590 return filelog.filelog(self.sopener, f)
590 return filelog.filelog(self.sopener, f)
591
591
592 def changectx(self, changeid):
592 def changectx(self, changeid):
593 return self[changeid]
593 return self[changeid]
594
594
595 def parents(self, changeid=None):
595 def parents(self, changeid=None):
596 '''get list of changectxs for parents of changeid'''
596 '''get list of changectxs for parents of changeid'''
597 return self[changeid].parents()
597 return self[changeid].parents()
598
598
599 def filectx(self, path, changeid=None, fileid=None):
599 def filectx(self, path, changeid=None, fileid=None):
600 """changeid can be a changeset revision, node, or tag.
600 """changeid can be a changeset revision, node, or tag.
601 fileid can be a file revision or node."""
601 fileid can be a file revision or node."""
602 return context.filectx(self, path, changeid, fileid)
602 return context.filectx(self, path, changeid, fileid)
603
603
604 def getcwd(self):
604 def getcwd(self):
605 return self.dirstate.getcwd()
605 return self.dirstate.getcwd()
606
606
607 def pathto(self, f, cwd=None):
607 def pathto(self, f, cwd=None):
608 return self.dirstate.pathto(f, cwd)
608 return self.dirstate.pathto(f, cwd)
609
609
610 def wfile(self, f, mode='r'):
610 def wfile(self, f, mode='r'):
611 return self.wopener(f, mode)
611 return self.wopener(f, mode)
612
612
613 def _link(self, f):
613 def _link(self, f):
614 return os.path.islink(self.wjoin(f))
614 return os.path.islink(self.wjoin(f))
615
615
616 def _loadfilter(self, filter):
616 def _loadfilter(self, filter):
617 if filter not in self.filterpats:
617 if filter not in self.filterpats:
618 l = []
618 l = []
619 for pat, cmd in self.ui.configitems(filter):
619 for pat, cmd in self.ui.configitems(filter):
620 if cmd == '!':
620 if cmd == '!':
621 continue
621 continue
622 mf = matchmod.match(self.root, '', [pat])
622 mf = matchmod.match(self.root, '', [pat])
623 fn = None
623 fn = None
624 params = cmd
624 params = cmd
625 for name, filterfn in self._datafilters.iteritems():
625 for name, filterfn in self._datafilters.iteritems():
626 if cmd.startswith(name):
626 if cmd.startswith(name):
627 fn = filterfn
627 fn = filterfn
628 params = cmd[len(name):].lstrip()
628 params = cmd[len(name):].lstrip()
629 break
629 break
630 if not fn:
630 if not fn:
631 fn = lambda s, c, **kwargs: util.filter(s, c)
631 fn = lambda s, c, **kwargs: util.filter(s, c)
632 # Wrap old filters not supporting keyword arguments
632 # Wrap old filters not supporting keyword arguments
633 if not inspect.getargspec(fn)[2]:
633 if not inspect.getargspec(fn)[2]:
634 oldfn = fn
634 oldfn = fn
635 fn = lambda s, c, **kwargs: oldfn(s, c)
635 fn = lambda s, c, **kwargs: oldfn(s, c)
636 l.append((mf, fn, params))
636 l.append((mf, fn, params))
637 self.filterpats[filter] = l
637 self.filterpats[filter] = l
638 return self.filterpats[filter]
638 return self.filterpats[filter]
639
639
640 def _filter(self, filterpats, filename, data):
640 def _filter(self, filterpats, filename, data):
641 for mf, fn, cmd in filterpats:
641 for mf, fn, cmd in filterpats:
642 if mf(filename):
642 if mf(filename):
643 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
643 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
644 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
644 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
645 break
645 break
646
646
647 return data
647 return data
648
648
649 @propertycache
649 @propertycache
650 def _encodefilterpats(self):
650 def _encodefilterpats(self):
651 return self._loadfilter('encode')
651 return self._loadfilter('encode')
652
652
653 @propertycache
653 @propertycache
654 def _decodefilterpats(self):
654 def _decodefilterpats(self):
655 return self._loadfilter('decode')
655 return self._loadfilter('decode')
656
656
657 def adddatafilter(self, name, filter):
657 def adddatafilter(self, name, filter):
658 self._datafilters[name] = filter
658 self._datafilters[name] = filter
659
659
660 def wread(self, filename):
660 def wread(self, filename):
661 if self._link(filename):
661 if self._link(filename):
662 data = os.readlink(self.wjoin(filename))
662 data = os.readlink(self.wjoin(filename))
663 else:
663 else:
664 data = self.wopener.read(filename)
664 data = self.wopener.read(filename)
665 return self._filter(self._encodefilterpats, filename, data)
665 return self._filter(self._encodefilterpats, filename, data)
666
666
667 def wwrite(self, filename, data, flags):
667 def wwrite(self, filename, data, flags):
668 data = self._filter(self._decodefilterpats, filename, data)
668 data = self._filter(self._decodefilterpats, filename, data)
669 if 'l' in flags:
669 if 'l' in flags:
670 self.wopener.symlink(data, filename)
670 self.wopener.symlink(data, filename)
671 else:
671 else:
672 self.wopener.write(filename, data)
672 self.wopener.write(filename, data)
673 if 'x' in flags:
673 if 'x' in flags:
674 util.setflags(self.wjoin(filename), False, True)
674 util.setflags(self.wjoin(filename), False, True)
675
675
676 def wwritedata(self, filename, data):
676 def wwritedata(self, filename, data):
677 return self._filter(self._decodefilterpats, filename, data)
677 return self._filter(self._decodefilterpats, filename, data)
678
678
679 def transaction(self, desc):
679 def transaction(self, desc):
680 tr = self._transref and self._transref() or None
680 tr = self._transref and self._transref() or None
681 if tr and tr.running():
681 if tr and tr.running():
682 return tr.nest()
682 return tr.nest()
683
683
684 # abort here if the journal already exists
684 # abort here if the journal already exists
685 if os.path.exists(self.sjoin("journal")):
685 if os.path.exists(self.sjoin("journal")):
686 raise error.RepoError(
686 raise error.RepoError(
687 _("abandoned transaction found - run hg recover"))
687 _("abandoned transaction found - run hg recover"))
688
688
689 journalfiles = self._writejournal(desc)
689 journalfiles = self._writejournal(desc)
690 renames = [(x, undoname(x)) for x in journalfiles]
690 renames = [(x, undoname(x)) for x in journalfiles]
691
691
692 tr = transaction.transaction(self.ui.warn, self.sopener,
692 tr = transaction.transaction(self.ui.warn, self.sopener,
693 self.sjoin("journal"),
693 self.sjoin("journal"),
694 aftertrans(renames),
694 aftertrans(renames),
695 self.store.createmode)
695 self.store.createmode)
696 self._transref = weakref.ref(tr)
696 self._transref = weakref.ref(tr)
697 return tr
697 return tr
698
698
699 def _writejournal(self, desc):
699 def _writejournal(self, desc):
700 # save dirstate for rollback
700 # save dirstate for rollback
701 try:
701 try:
702 ds = self.opener.read("dirstate")
702 ds = self.opener.read("dirstate")
703 except IOError:
703 except IOError:
704 ds = ""
704 ds = ""
705 self.opener.write("journal.dirstate", ds)
705 self.opener.write("journal.dirstate", ds)
706 self.opener.write("journal.branch",
706 self.opener.write("journal.branch",
707 encoding.fromlocal(self.dirstate.branch()))
707 encoding.fromlocal(self.dirstate.branch()))
708 self.opener.write("journal.desc",
708 self.opener.write("journal.desc",
709 "%d\n%s\n" % (len(self), desc))
709 "%d\n%s\n" % (len(self), desc))
710
710
711 bkname = self.join('bookmarks')
711 bkname = self.join('bookmarks')
712 if os.path.exists(bkname):
712 if os.path.exists(bkname):
713 util.copyfile(bkname, self.join('journal.bookmarks'))
713 util.copyfile(bkname, self.join('journal.bookmarks'))
714 else:
714 else:
715 self.opener.write('journal.bookmarks', '')
715 self.opener.write('journal.bookmarks', '')
716
716
717 return (self.sjoin('journal'), self.join('journal.dirstate'),
717 return (self.sjoin('journal'), self.join('journal.dirstate'),
718 self.join('journal.branch'), self.join('journal.desc'),
718 self.join('journal.branch'), self.join('journal.desc'),
719 self.join('journal.bookmarks'))
719 self.join('journal.bookmarks'))
720
720
721 def recover(self):
721 def recover(self):
722 lock = self.lock()
722 lock = self.lock()
723 try:
723 try:
724 if os.path.exists(self.sjoin("journal")):
724 if os.path.exists(self.sjoin("journal")):
725 self.ui.status(_("rolling back interrupted transaction\n"))
725 self.ui.status(_("rolling back interrupted transaction\n"))
726 transaction.rollback(self.sopener, self.sjoin("journal"),
726 transaction.rollback(self.sopener, self.sjoin("journal"),
727 self.ui.warn)
727 self.ui.warn)
728 self.invalidate()
728 self.invalidate()
729 return True
729 return True
730 else:
730 else:
731 self.ui.warn(_("no interrupted transaction available\n"))
731 self.ui.warn(_("no interrupted transaction available\n"))
732 return False
732 return False
733 finally:
733 finally:
734 lock.release()
734 lock.release()
735
735
736 def rollback(self, dryrun=False):
736 def rollback(self, dryrun=False):
737 wlock = lock = None
737 wlock = lock = None
738 try:
738 try:
739 wlock = self.wlock()
739 wlock = self.wlock()
740 lock = self.lock()
740 lock = self.lock()
741 if os.path.exists(self.sjoin("undo")):
741 if os.path.exists(self.sjoin("undo")):
742 try:
742 try:
743 args = self.opener.read("undo.desc").splitlines()
743 args = self.opener.read("undo.desc").splitlines()
744 if len(args) >= 3 and self.ui.verbose:
744 if len(args) >= 3 and self.ui.verbose:
745 desc = _("repository tip rolled back to revision %s"
745 desc = _("repository tip rolled back to revision %s"
746 " (undo %s: %s)\n") % (
746 " (undo %s: %s)\n") % (
747 int(args[0]) - 1, args[1], args[2])
747 int(args[0]) - 1, args[1], args[2])
748 elif len(args) >= 2:
748 elif len(args) >= 2:
749 desc = _("repository tip rolled back to revision %s"
749 desc = _("repository tip rolled back to revision %s"
750 " (undo %s)\n") % (
750 " (undo %s)\n") % (
751 int(args[0]) - 1, args[1])
751 int(args[0]) - 1, args[1])
752 except IOError:
752 except IOError:
753 desc = _("rolling back unknown transaction\n")
753 desc = _("rolling back unknown transaction\n")
754 self.ui.status(desc)
754 self.ui.status(desc)
755 if dryrun:
755 if dryrun:
756 return
756 return
757 transaction.rollback(self.sopener, self.sjoin("undo"),
757 transaction.rollback(self.sopener, self.sjoin("undo"),
758 self.ui.warn)
758 self.ui.warn)
759 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
759 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
760 if os.path.exists(self.join('undo.bookmarks')):
760 if os.path.exists(self.join('undo.bookmarks')):
761 util.rename(self.join('undo.bookmarks'),
761 util.rename(self.join('undo.bookmarks'),
762 self.join('bookmarks'))
762 self.join('bookmarks'))
763 try:
763 try:
764 branch = self.opener.read("undo.branch")
764 branch = self.opener.read("undo.branch")
765 self.dirstate.setbranch(branch)
765 self.dirstate.setbranch(branch)
766 except IOError:
766 except IOError:
767 self.ui.warn(_("named branch could not be reset, "
767 self.ui.warn(_("named branch could not be reset, "
768 "current branch is still: %s\n")
768 "current branch is still: %s\n")
769 % self.dirstate.branch())
769 % self.dirstate.branch())
770 self.invalidate()
770 self.invalidate()
771 self.dirstate.invalidate()
771 self.dirstate.invalidate()
772 self.destroyed()
772 self.destroyed()
773 parents = tuple([p.rev() for p in self.parents()])
773 parents = tuple([p.rev() for p in self.parents()])
774 if len(parents) > 1:
774 if len(parents) > 1:
775 self.ui.status(_("working directory now based on "
775 self.ui.status(_("working directory now based on "
776 "revisions %d and %d\n") % parents)
776 "revisions %d and %d\n") % parents)
777 else:
777 else:
778 self.ui.status(_("working directory now based on "
778 self.ui.status(_("working directory now based on "
779 "revision %d\n") % parents)
779 "revision %d\n") % parents)
780 else:
780 else:
781 self.ui.warn(_("no rollback information available\n"))
781 self.ui.warn(_("no rollback information available\n"))
782 return 1
782 return 1
783 finally:
783 finally:
784 release(lock, wlock)
784 release(lock, wlock)
785
785
786 def invalidatecaches(self):
786 def invalidatecaches(self):
787 self._tags = None
787 self._tags = None
788 self._tagtypes = None
788 self._tagtypes = None
789 self.nodetagscache = None
789 self.nodetagscache = None
790 self._branchcache = None # in UTF-8
790 self._branchcache = None # in UTF-8
791 self._branchcachetip = None
791 self._branchcachetip = None
792
792
793 def invalidate(self):
793 def invalidate(self):
794 for a in ("changelog", "manifest", "_bookmarks", "_bookmarkcurrent"):
794 for a in ("changelog", "manifest", "_bookmarks", "_bookmarkcurrent"):
795 if a in self.__dict__:
795 if a in self.__dict__:
796 delattr(self, a)
796 delattr(self, a)
797 self.invalidatecaches()
797 self.invalidatecaches()
798
798
799 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
799 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
800 try:
800 try:
801 l = lock.lock(lockname, 0, releasefn, desc=desc)
801 l = lock.lock(lockname, 0, releasefn, desc=desc)
802 except error.LockHeld, inst:
802 except error.LockHeld, inst:
803 if not wait:
803 if not wait:
804 raise
804 raise
805 self.ui.warn(_("waiting for lock on %s held by %r\n") %
805 self.ui.warn(_("waiting for lock on %s held by %r\n") %
806 (desc, inst.locker))
806 (desc, inst.locker))
807 # default to 600 seconds timeout
807 # default to 600 seconds timeout
808 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
808 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
809 releasefn, desc=desc)
809 releasefn, desc=desc)
810 if acquirefn:
810 if acquirefn:
811 acquirefn()
811 acquirefn()
812 return l
812 return l
813
813
814 def lock(self, wait=True):
814 def lock(self, wait=True):
815 '''Lock the repository store (.hg/store) and return a weak reference
815 '''Lock the repository store (.hg/store) and return a weak reference
816 to the lock. Use this before modifying the store (e.g. committing or
816 to the lock. Use this before modifying the store (e.g. committing or
817 stripping). If you are opening a transaction, get a lock as well.)'''
817 stripping). If you are opening a transaction, get a lock as well.)'''
818 l = self._lockref and self._lockref()
818 l = self._lockref and self._lockref()
819 if l is not None and l.held:
819 if l is not None and l.held:
820 l.lock()
820 l.lock()
821 return l
821 return l
822
822
823 l = self._lock(self.sjoin("lock"), wait, self.store.write,
823 l = self._lock(self.sjoin("lock"), wait, self.store.write,
824 self.invalidate, _('repository %s') % self.origroot)
824 self.invalidate, _('repository %s') % self.origroot)
825 self._lockref = weakref.ref(l)
825 self._lockref = weakref.ref(l)
826 return l
826 return l
827
827
828 def wlock(self, wait=True):
828 def wlock(self, wait=True):
829 '''Lock the non-store parts of the repository (everything under
829 '''Lock the non-store parts of the repository (everything under
830 .hg except .hg/store) and return a weak reference to the lock.
830 .hg except .hg/store) and return a weak reference to the lock.
831 Use this before modifying files in .hg.'''
831 Use this before modifying files in .hg.'''
832 l = self._wlockref and self._wlockref()
832 l = self._wlockref and self._wlockref()
833 if l is not None and l.held:
833 if l is not None and l.held:
834 l.lock()
834 l.lock()
835 return l
835 return l
836
836
837 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
837 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
838 self.dirstate.invalidate, _('working directory of %s') %
838 self.dirstate.invalidate, _('working directory of %s') %
839 self.origroot)
839 self.origroot)
840 self._wlockref = weakref.ref(l)
840 self._wlockref = weakref.ref(l)
841 return l
841 return l
842
842
843 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
843 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
844 """
844 """
845 commit an individual file as part of a larger transaction
845 commit an individual file as part of a larger transaction
846 """
846 """
847
847
848 fname = fctx.path()
848 fname = fctx.path()
849 text = fctx.data()
849 text = fctx.data()
850 flog = self.file(fname)
850 flog = self.file(fname)
851 fparent1 = manifest1.get(fname, nullid)
851 fparent1 = manifest1.get(fname, nullid)
852 fparent2 = fparent2o = manifest2.get(fname, nullid)
852 fparent2 = fparent2o = manifest2.get(fname, nullid)
853
853
854 meta = {}
854 meta = {}
855 copy = fctx.renamed()
855 copy = fctx.renamed()
856 if copy and copy[0] != fname:
856 if copy and copy[0] != fname:
857 # Mark the new revision of this file as a copy of another
857 # Mark the new revision of this file as a copy of another
858 # file. This copy data will effectively act as a parent
858 # file. This copy data will effectively act as a parent
859 # of this new revision. If this is a merge, the first
859 # of this new revision. If this is a merge, the first
860 # parent will be the nullid (meaning "look up the copy data")
860 # parent will be the nullid (meaning "look up the copy data")
861 # and the second one will be the other parent. For example:
861 # and the second one will be the other parent. For example:
862 #
862 #
863 # 0 --- 1 --- 3 rev1 changes file foo
863 # 0 --- 1 --- 3 rev1 changes file foo
864 # \ / rev2 renames foo to bar and changes it
864 # \ / rev2 renames foo to bar and changes it
865 # \- 2 -/ rev3 should have bar with all changes and
865 # \- 2 -/ rev3 should have bar with all changes and
866 # should record that bar descends from
866 # should record that bar descends from
867 # bar in rev2 and foo in rev1
867 # bar in rev2 and foo in rev1
868 #
868 #
869 # this allows this merge to succeed:
869 # this allows this merge to succeed:
870 #
870 #
871 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
871 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
872 # \ / merging rev3 and rev4 should use bar@rev2
872 # \ / merging rev3 and rev4 should use bar@rev2
873 # \- 2 --- 4 as the merge base
873 # \- 2 --- 4 as the merge base
874 #
874 #
875
875
876 cfname = copy[0]
876 cfname = copy[0]
877 crev = manifest1.get(cfname)
877 crev = manifest1.get(cfname)
878 newfparent = fparent2
878 newfparent = fparent2
879
879
880 if manifest2: # branch merge
880 if manifest2: # branch merge
881 if fparent2 == nullid or crev is None: # copied on remote side
881 if fparent2 == nullid or crev is None: # copied on remote side
882 if cfname in manifest2:
882 if cfname in manifest2:
883 crev = manifest2[cfname]
883 crev = manifest2[cfname]
884 newfparent = fparent1
884 newfparent = fparent1
885
885
886 # find source in nearest ancestor if we've lost track
886 # find source in nearest ancestor if we've lost track
887 if not crev:
887 if not crev:
888 self.ui.debug(" %s: searching for copy revision for %s\n" %
888 self.ui.debug(" %s: searching for copy revision for %s\n" %
889 (fname, cfname))
889 (fname, cfname))
890 for ancestor in self[None].ancestors():
890 for ancestor in self[None].ancestors():
891 if cfname in ancestor:
891 if cfname in ancestor:
892 crev = ancestor[cfname].filenode()
892 crev = ancestor[cfname].filenode()
893 break
893 break
894
894
895 if crev:
895 if crev:
896 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
896 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
897 meta["copy"] = cfname
897 meta["copy"] = cfname
898 meta["copyrev"] = hex(crev)
898 meta["copyrev"] = hex(crev)
899 fparent1, fparent2 = nullid, newfparent
899 fparent1, fparent2 = nullid, newfparent
900 else:
900 else:
901 self.ui.warn(_("warning: can't find ancestor for '%s' "
901 self.ui.warn(_("warning: can't find ancestor for '%s' "
902 "copied from '%s'!\n") % (fname, cfname))
902 "copied from '%s'!\n") % (fname, cfname))
903
903
904 elif fparent2 != nullid:
904 elif fparent2 != nullid:
905 # is one parent an ancestor of the other?
905 # is one parent an ancestor of the other?
906 fparentancestor = flog.ancestor(fparent1, fparent2)
906 fparentancestor = flog.ancestor(fparent1, fparent2)
907 if fparentancestor == fparent1:
907 if fparentancestor == fparent1:
908 fparent1, fparent2 = fparent2, nullid
908 fparent1, fparent2 = fparent2, nullid
909 elif fparentancestor == fparent2:
909 elif fparentancestor == fparent2:
910 fparent2 = nullid
910 fparent2 = nullid
911
911
912 # is the file changed?
912 # is the file changed?
913 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
913 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
914 changelist.append(fname)
914 changelist.append(fname)
915 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
915 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
916
916
917 # are just the flags changed during merge?
917 # are just the flags changed during merge?
918 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
918 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
919 changelist.append(fname)
919 changelist.append(fname)
920
920
921 return fparent1
921 return fparent1
922
922
923 def commit(self, text="", user=None, date=None, match=None, force=False,
923 def commit(self, text="", user=None, date=None, match=None, force=False,
924 editor=False, extra={}):
924 editor=False, extra={}):
925 """Add a new revision to current repository.
925 """Add a new revision to current repository.
926
926
927 Revision information is gathered from the working directory,
927 Revision information is gathered from the working directory,
928 match can be used to filter the committed files. If editor is
928 match can be used to filter the committed files. If editor is
929 supplied, it is called to get a commit message.
929 supplied, it is called to get a commit message.
930 """
930 """
931
931
932 def fail(f, msg):
932 def fail(f, msg):
933 raise util.Abort('%s: %s' % (f, msg))
933 raise util.Abort('%s: %s' % (f, msg))
934
934
935 if not match:
935 if not match:
936 match = matchmod.always(self.root, '')
936 match = matchmod.always(self.root, '')
937
937
938 if not force:
938 if not force:
939 vdirs = []
939 vdirs = []
940 match.dir = vdirs.append
940 match.dir = vdirs.append
941 match.bad = fail
941 match.bad = fail
942
942
943 wlock = self.wlock()
943 wlock = self.wlock()
944 try:
944 try:
945 wctx = self[None]
945 wctx = self[None]
946 merge = len(wctx.parents()) > 1
946 merge = len(wctx.parents()) > 1
947
947
948 if (not force and merge and match and
948 if (not force and merge and match and
949 (match.files() or match.anypats())):
949 (match.files() or match.anypats())):
950 raise util.Abort(_('cannot partially commit a merge '
950 raise util.Abort(_('cannot partially commit a merge '
951 '(do not specify files or patterns)'))
951 '(do not specify files or patterns)'))
952
952
953 changes = self.status(match=match, clean=force)
953 changes = self.status(match=match, clean=force)
954 if force:
954 if force:
955 changes[0].extend(changes[6]) # mq may commit unchanged files
955 changes[0].extend(changes[6]) # mq may commit unchanged files
956
956
957 # check subrepos
957 # check subrepos
958 subs = []
958 subs = []
959 removedsubs = set()
959 removedsubs = set()
960 if '.hgsub' in wctx:
960 if '.hgsub' in wctx:
961 # only manage subrepos and .hgsubstate if .hgsub is present
961 # only manage subrepos and .hgsubstate if .hgsub is present
962 for p in wctx.parents():
962 for p in wctx.parents():
963 removedsubs.update(s for s in p.substate if match(s))
963 removedsubs.update(s for s in p.substate if match(s))
964 for s in wctx.substate:
964 for s in wctx.substate:
965 removedsubs.discard(s)
965 removedsubs.discard(s)
966 if match(s) and wctx.sub(s).dirty():
966 if match(s) and wctx.sub(s).dirty():
967 subs.append(s)
967 subs.append(s)
968 if (subs or removedsubs):
968 if (subs or removedsubs):
969 if (not match('.hgsub') and
969 if (not match('.hgsub') and
970 '.hgsub' in (wctx.modified() + wctx.added())):
970 '.hgsub' in (wctx.modified() + wctx.added())):
971 raise util.Abort(
971 raise util.Abort(
972 _("can't commit subrepos without .hgsub"))
972 _("can't commit subrepos without .hgsub"))
973 if '.hgsubstate' not in changes[0]:
973 if '.hgsubstate' not in changes[0]:
974 changes[0].insert(0, '.hgsubstate')
974 changes[0].insert(0, '.hgsubstate')
975 if '.hgsubstate' in changes[2]:
975 if '.hgsubstate' in changes[2]:
976 changes[2].remove('.hgsubstate')
976 changes[2].remove('.hgsubstate')
977 elif '.hgsub' in changes[2]:
977 elif '.hgsub' in changes[2]:
978 # clean up .hgsubstate when .hgsub is removed
978 # clean up .hgsubstate when .hgsub is removed
979 if ('.hgsubstate' in wctx and
979 if ('.hgsubstate' in wctx and
980 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
980 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
981 changes[2].insert(0, '.hgsubstate')
981 changes[2].insert(0, '.hgsubstate')
982
982
983 if subs and not self.ui.configbool('ui', 'commitsubrepos', True):
983 if subs and not self.ui.configbool('ui', 'commitsubrepos', True):
984 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
984 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
985 if changedsubs:
985 if changedsubs:
986 raise util.Abort(_("uncommitted changes in subrepo %s")
986 raise util.Abort(_("uncommitted changes in subrepo %s")
987 % changedsubs[0])
987 % changedsubs[0])
988
988
989 # make sure all explicit patterns are matched
989 # make sure all explicit patterns are matched
990 if not force and match.files():
990 if not force and match.files():
991 matched = set(changes[0] + changes[1] + changes[2])
991 matched = set(changes[0] + changes[1] + changes[2])
992
992
993 for f in match.files():
993 for f in match.files():
994 if f == '.' or f in matched or f in wctx.substate:
994 if f == '.' or f in matched or f in wctx.substate:
995 continue
995 continue
996 if f in changes[3]: # missing
996 if f in changes[3]: # missing
997 fail(f, _('file not found!'))
997 fail(f, _('file not found!'))
998 if f in vdirs: # visited directory
998 if f in vdirs: # visited directory
999 d = f + '/'
999 d = f + '/'
1000 for mf in matched:
1000 for mf in matched:
1001 if mf.startswith(d):
1001 if mf.startswith(d):
1002 break
1002 break
1003 else:
1003 else:
1004 fail(f, _("no match under directory!"))
1004 fail(f, _("no match under directory!"))
1005 elif f not in self.dirstate:
1005 elif f not in self.dirstate:
1006 fail(f, _("file not tracked!"))
1006 fail(f, _("file not tracked!"))
1007
1007
1008 if (not force and not extra.get("close") and not merge
1008 if (not force and not extra.get("close") and not merge
1009 and not (changes[0] or changes[1] or changes[2])
1009 and not (changes[0] or changes[1] or changes[2])
1010 and wctx.branch() == wctx.p1().branch()):
1010 and wctx.branch() == wctx.p1().branch()):
1011 return None
1011 return None
1012
1012
1013 ms = mergemod.mergestate(self)
1013 ms = mergemod.mergestate(self)
1014 for f in changes[0]:
1014 for f in changes[0]:
1015 if f in ms and ms[f] == 'u':
1015 if f in ms and ms[f] == 'u':
1016 raise util.Abort(_("unresolved merge conflicts "
1016 raise util.Abort(_("unresolved merge conflicts "
1017 "(see hg help resolve)"))
1017 "(see hg help resolve)"))
1018
1018
1019 cctx = context.workingctx(self, text, user, date, extra, changes)
1019 cctx = context.workingctx(self, text, user, date, extra, changes)
1020 if editor:
1020 if editor:
1021 cctx._text = editor(self, cctx, subs)
1021 cctx._text = editor(self, cctx, subs)
1022 edited = (text != cctx._text)
1022 edited = (text != cctx._text)
1023
1023
1024 # commit subs
1024 # commit subs
1025 if subs or removedsubs:
1025 if subs or removedsubs:
1026 state = wctx.substate.copy()
1026 state = wctx.substate.copy()
1027 for s in sorted(subs):
1027 for s in sorted(subs):
1028 sub = wctx.sub(s)
1028 sub = wctx.sub(s)
1029 self.ui.status(_('committing subrepository %s\n') %
1029 self.ui.status(_('committing subrepository %s\n') %
1030 subrepo.subrelpath(sub))
1030 subrepo.subrelpath(sub))
1031 sr = sub.commit(cctx._text, user, date)
1031 sr = sub.commit(cctx._text, user, date)
1032 state[s] = (state[s][0], sr)
1032 state[s] = (state[s][0], sr)
1033 subrepo.writestate(self, state)
1033 subrepo.writestate(self, state)
1034
1034
1035 # Save commit message in case this transaction gets rolled back
1035 # Save commit message in case this transaction gets rolled back
1036 # (e.g. by a pretxncommit hook). Leave the content alone on
1036 # (e.g. by a pretxncommit hook). Leave the content alone on
1037 # the assumption that the user will use the same editor again.
1037 # the assumption that the user will use the same editor again.
1038 msgfn = self.savecommitmessage(cctx._text)
1038 msgfn = self.savecommitmessage(cctx._text)
1039
1039
1040 p1, p2 = self.dirstate.parents()
1040 p1, p2 = self.dirstate.parents()
1041 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1041 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1042 try:
1042 try:
1043 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1043 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1044 ret = self.commitctx(cctx, True)
1044 ret = self.commitctx(cctx, True)
1045 except:
1045 except:
1046 if edited:
1046 if edited:
1047 self.ui.write(
1047 self.ui.write(
1048 _('note: commit message saved in %s\n') % msgfn)
1048 _('note: commit message saved in %s\n') % msgfn)
1049 raise
1049 raise
1050
1050
1051 # update bookmarks, dirstate and mergestate
1051 # update bookmarks, dirstate and mergestate
1052 bookmarks.update(self, p1, ret)
1052 bookmarks.update(self, p1, ret)
1053 for f in changes[0] + changes[1]:
1053 for f in changes[0] + changes[1]:
1054 self.dirstate.normal(f)
1054 self.dirstate.normal(f)
1055 for f in changes[2]:
1055 for f in changes[2]:
1056 self.dirstate.drop(f)
1056 self.dirstate.drop(f)
1057 self.dirstate.setparents(ret)
1057 self.dirstate.setparents(ret)
1058 ms.reset()
1058 ms.reset()
1059 finally:
1059 finally:
1060 wlock.release()
1060 wlock.release()
1061
1061
1062 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1062 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1063 return ret
1063 return ret
1064
1064
1065 def commitctx(self, ctx, error=False):
1065 def commitctx(self, ctx, error=False):
1066 """Add a new revision to current repository.
1066 """Add a new revision to current repository.
1067 Revision information is passed via the context argument.
1067 Revision information is passed via the context argument.
1068 """
1068 """
1069
1069
1070 tr = lock = None
1070 tr = lock = None
1071 removed = list(ctx.removed())
1071 removed = list(ctx.removed())
1072 p1, p2 = ctx.p1(), ctx.p2()
1072 p1, p2 = ctx.p1(), ctx.p2()
1073 user = ctx.user()
1073 user = ctx.user()
1074
1074
1075 lock = self.lock()
1075 lock = self.lock()
1076 try:
1076 try:
1077 tr = self.transaction("commit")
1077 tr = self.transaction("commit")
1078 trp = weakref.proxy(tr)
1078 trp = weakref.proxy(tr)
1079
1079
1080 if ctx.files():
1080 if ctx.files():
1081 m1 = p1.manifest().copy()
1081 m1 = p1.manifest().copy()
1082 m2 = p2.manifest()
1082 m2 = p2.manifest()
1083
1083
1084 # check in files
1084 # check in files
1085 new = {}
1085 new = {}
1086 changed = []
1086 changed = []
1087 linkrev = len(self)
1087 linkrev = len(self)
1088 for f in sorted(ctx.modified() + ctx.added()):
1088 for f in sorted(ctx.modified() + ctx.added()):
1089 self.ui.note(f + "\n")
1089 self.ui.note(f + "\n")
1090 try:
1090 try:
1091 fctx = ctx[f]
1091 fctx = ctx[f]
1092 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1092 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1093 changed)
1093 changed)
1094 m1.set(f, fctx.flags())
1094 m1.set(f, fctx.flags())
1095 except OSError, inst:
1095 except OSError, inst:
1096 self.ui.warn(_("trouble committing %s!\n") % f)
1096 self.ui.warn(_("trouble committing %s!\n") % f)
1097 raise
1097 raise
1098 except IOError, inst:
1098 except IOError, inst:
1099 errcode = getattr(inst, 'errno', errno.ENOENT)
1099 errcode = getattr(inst, 'errno', errno.ENOENT)
1100 if error or errcode and errcode != errno.ENOENT:
1100 if error or errcode and errcode != errno.ENOENT:
1101 self.ui.warn(_("trouble committing %s!\n") % f)
1101 self.ui.warn(_("trouble committing %s!\n") % f)
1102 raise
1102 raise
1103 else:
1103 else:
1104 removed.append(f)
1104 removed.append(f)
1105
1105
1106 # update manifest
1106 # update manifest
1107 m1.update(new)
1107 m1.update(new)
1108 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1108 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1109 drop = [f for f in removed if f in m1]
1109 drop = [f for f in removed if f in m1]
1110 for f in drop:
1110 for f in drop:
1111 del m1[f]
1111 del m1[f]
1112 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1112 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1113 p2.manifestnode(), (new, drop))
1113 p2.manifestnode(), (new, drop))
1114 files = changed + removed
1114 files = changed + removed
1115 else:
1115 else:
1116 mn = p1.manifestnode()
1116 mn = p1.manifestnode()
1117 files = []
1117 files = []
1118
1118
1119 # update changelog
1119 # update changelog
1120 self.changelog.delayupdate()
1120 self.changelog.delayupdate()
1121 n = self.changelog.add(mn, files, ctx.description(),
1121 n = self.changelog.add(mn, files, ctx.description(),
1122 trp, p1.node(), p2.node(),
1122 trp, p1.node(), p2.node(),
1123 user, ctx.date(), ctx.extra().copy())
1123 user, ctx.date(), ctx.extra().copy())
1124 p = lambda: self.changelog.writepending() and self.root or ""
1124 p = lambda: self.changelog.writepending() and self.root or ""
1125 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1125 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1126 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1126 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1127 parent2=xp2, pending=p)
1127 parent2=xp2, pending=p)
1128 self.changelog.finalize(trp)
1128 self.changelog.finalize(trp)
1129 tr.close()
1129 tr.close()
1130
1130
1131 if self._branchcache:
1131 if self._branchcache:
1132 self.updatebranchcache()
1132 self.updatebranchcache()
1133 return n
1133 return n
1134 finally:
1134 finally:
1135 if tr:
1135 if tr:
1136 tr.release()
1136 tr.release()
1137 lock.release()
1137 lock.release()
1138
1138
1139 def destroyed(self):
1139 def destroyed(self):
1140 '''Inform the repository that nodes have been destroyed.
1140 '''Inform the repository that nodes have been destroyed.
1141 Intended for use by strip and rollback, so there's a common
1141 Intended for use by strip and rollback, so there's a common
1142 place for anything that has to be done after destroying history.'''
1142 place for anything that has to be done after destroying history.'''
1143 # XXX it might be nice if we could take the list of destroyed
1143 # XXX it might be nice if we could take the list of destroyed
1144 # nodes, but I don't see an easy way for rollback() to do that
1144 # nodes, but I don't see an easy way for rollback() to do that
1145
1145
1146 # Ensure the persistent tag cache is updated. Doing it now
1146 # Ensure the persistent tag cache is updated. Doing it now
1147 # means that the tag cache only has to worry about destroyed
1147 # means that the tag cache only has to worry about destroyed
1148 # heads immediately after a strip/rollback. That in turn
1148 # heads immediately after a strip/rollback. That in turn
1149 # guarantees that "cachetip == currenttip" (comparing both rev
1149 # guarantees that "cachetip == currenttip" (comparing both rev
1150 # and node) always means no nodes have been added or destroyed.
1150 # and node) always means no nodes have been added or destroyed.
1151
1151
1152 # XXX this is suboptimal when qrefresh'ing: we strip the current
1152 # XXX this is suboptimal when qrefresh'ing: we strip the current
1153 # head, refresh the tag cache, then immediately add a new head.
1153 # head, refresh the tag cache, then immediately add a new head.
1154 # But I think doing it this way is necessary for the "instant
1154 # But I think doing it this way is necessary for the "instant
1155 # tag cache retrieval" case to work.
1155 # tag cache retrieval" case to work.
1156 self.invalidatecaches()
1156 self.invalidatecaches()
1157
1157
1158 def walk(self, match, node=None):
1158 def walk(self, match, node=None):
1159 '''
1159 '''
1160 walk recursively through the directory tree or a given
1160 walk recursively through the directory tree or a given
1161 changeset, finding all files matched by the match
1161 changeset, finding all files matched by the match
1162 function
1162 function
1163 '''
1163 '''
1164 return self[node].walk(match)
1164 return self[node].walk(match)
1165
1165
1166 def status(self, node1='.', node2=None, match=None,
1166 def status(self, node1='.', node2=None, match=None,
1167 ignored=False, clean=False, unknown=False,
1167 ignored=False, clean=False, unknown=False,
1168 listsubrepos=False):
1168 listsubrepos=False):
1169 """return status of files between two nodes or node and working directory
1169 """return status of files between two nodes or node and working directory
1170
1170
1171 If node1 is None, use the first dirstate parent instead.
1171 If node1 is None, use the first dirstate parent instead.
1172 If node2 is None, compare node1 with working directory.
1172 If node2 is None, compare node1 with working directory.
1173 """
1173 """
1174
1174
1175 def mfmatches(ctx):
1175 def mfmatches(ctx):
1176 mf = ctx.manifest().copy()
1176 mf = ctx.manifest().copy()
1177 for fn in mf.keys():
1177 for fn in mf.keys():
1178 if not match(fn):
1178 if not match(fn):
1179 del mf[fn]
1179 del mf[fn]
1180 return mf
1180 return mf
1181
1181
1182 if isinstance(node1, context.changectx):
1182 if isinstance(node1, context.changectx):
1183 ctx1 = node1
1183 ctx1 = node1
1184 else:
1184 else:
1185 ctx1 = self[node1]
1185 ctx1 = self[node1]
1186 if isinstance(node2, context.changectx):
1186 if isinstance(node2, context.changectx):
1187 ctx2 = node2
1187 ctx2 = node2
1188 else:
1188 else:
1189 ctx2 = self[node2]
1189 ctx2 = self[node2]
1190
1190
1191 working = ctx2.rev() is None
1191 working = ctx2.rev() is None
1192 parentworking = working and ctx1 == self['.']
1192 parentworking = working and ctx1 == self['.']
1193 match = match or matchmod.always(self.root, self.getcwd())
1193 match = match or matchmod.always(self.root, self.getcwd())
1194 listignored, listclean, listunknown = ignored, clean, unknown
1194 listignored, listclean, listunknown = ignored, clean, unknown
1195
1195
1196 # load earliest manifest first for caching reasons
1196 # load earliest manifest first for caching reasons
1197 if not working and ctx2.rev() < ctx1.rev():
1197 if not working and ctx2.rev() < ctx1.rev():
1198 ctx2.manifest()
1198 ctx2.manifest()
1199
1199
1200 if not parentworking:
1200 if not parentworking:
1201 def bad(f, msg):
1201 def bad(f, msg):
1202 if f not in ctx1:
1202 if f not in ctx1:
1203 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1203 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1204 match.bad = bad
1204 match.bad = bad
1205
1205
1206 if working: # we need to scan the working dir
1206 if working: # we need to scan the working dir
1207 subrepos = []
1207 subrepos = []
1208 if '.hgsub' in self.dirstate:
1208 if '.hgsub' in self.dirstate:
1209 subrepos = ctx2.substate.keys()
1209 subrepos = ctx2.substate.keys()
1210 s = self.dirstate.status(match, subrepos, listignored,
1210 s = self.dirstate.status(match, subrepos, listignored,
1211 listclean, listunknown)
1211 listclean, listunknown)
1212 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1212 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1213
1213
1214 # check for any possibly clean files
1214 # check for any possibly clean files
1215 if parentworking and cmp:
1215 if parentworking and cmp:
1216 fixup = []
1216 fixup = []
1217 # do a full compare of any files that might have changed
1217 # do a full compare of any files that might have changed
1218 for f in sorted(cmp):
1218 for f in sorted(cmp):
1219 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1219 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1220 or ctx1[f].cmp(ctx2[f])):
1220 or ctx1[f].cmp(ctx2[f])):
1221 modified.append(f)
1221 modified.append(f)
1222 else:
1222 else:
1223 fixup.append(f)
1223 fixup.append(f)
1224
1224
1225 # update dirstate for files that are actually clean
1225 # update dirstate for files that are actually clean
1226 if fixup:
1226 if fixup:
1227 if listclean:
1227 if listclean:
1228 clean += fixup
1228 clean += fixup
1229
1229
1230 try:
1230 try:
1231 # updating the dirstate is optional
1231 # updating the dirstate is optional
1232 # so we don't wait on the lock
1232 # so we don't wait on the lock
1233 wlock = self.wlock(False)
1233 wlock = self.wlock(False)
1234 try:
1234 try:
1235 for f in fixup:
1235 for f in fixup:
1236 self.dirstate.normal(f)
1236 self.dirstate.normal(f)
1237 finally:
1237 finally:
1238 wlock.release()
1238 wlock.release()
1239 except error.LockError:
1239 except error.LockError:
1240 pass
1240 pass
1241
1241
1242 if not parentworking:
1242 if not parentworking:
1243 mf1 = mfmatches(ctx1)
1243 mf1 = mfmatches(ctx1)
1244 if working:
1244 if working:
1245 # we are comparing working dir against non-parent
1245 # we are comparing working dir against non-parent
1246 # generate a pseudo-manifest for the working dir
1246 # generate a pseudo-manifest for the working dir
1247 mf2 = mfmatches(self['.'])
1247 mf2 = mfmatches(self['.'])
1248 for f in cmp + modified + added:
1248 for f in cmp + modified + added:
1249 mf2[f] = None
1249 mf2[f] = None
1250 mf2.set(f, ctx2.flags(f))
1250 mf2.set(f, ctx2.flags(f))
1251 for f in removed:
1251 for f in removed:
1252 if f in mf2:
1252 if f in mf2:
1253 del mf2[f]
1253 del mf2[f]
1254 else:
1254 else:
1255 # we are comparing two revisions
1255 # we are comparing two revisions
1256 deleted, unknown, ignored = [], [], []
1256 deleted, unknown, ignored = [], [], []
1257 mf2 = mfmatches(ctx2)
1257 mf2 = mfmatches(ctx2)
1258
1258
1259 modified, added, clean = [], [], []
1259 modified, added, clean = [], [], []
1260 for fn in mf2:
1260 for fn in mf2:
1261 if fn in mf1:
1261 if fn in mf1:
1262 if (fn not in deleted and
1262 if (fn not in deleted and
1263 (mf1.flags(fn) != mf2.flags(fn) or
1263 (mf1.flags(fn) != mf2.flags(fn) or
1264 (mf1[fn] != mf2[fn] and
1264 (mf1[fn] != mf2[fn] and
1265 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1265 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1266 modified.append(fn)
1266 modified.append(fn)
1267 elif listclean:
1267 elif listclean:
1268 clean.append(fn)
1268 clean.append(fn)
1269 del mf1[fn]
1269 del mf1[fn]
1270 elif fn not in deleted:
1270 elif fn not in deleted:
1271 added.append(fn)
1271 added.append(fn)
1272 removed = mf1.keys()
1272 removed = mf1.keys()
1273
1273
1274 r = modified, added, removed, deleted, unknown, ignored, clean
1274 r = modified, added, removed, deleted, unknown, ignored, clean
1275
1275
1276 if listsubrepos:
1276 if listsubrepos:
1277 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1277 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1278 if working:
1278 if working:
1279 rev2 = None
1279 rev2 = None
1280 else:
1280 else:
1281 rev2 = ctx2.substate[subpath][1]
1281 rev2 = ctx2.substate[subpath][1]
1282 try:
1282 try:
1283 submatch = matchmod.narrowmatcher(subpath, match)
1283 submatch = matchmod.narrowmatcher(subpath, match)
1284 s = sub.status(rev2, match=submatch, ignored=listignored,
1284 s = sub.status(rev2, match=submatch, ignored=listignored,
1285 clean=listclean, unknown=listunknown,
1285 clean=listclean, unknown=listunknown,
1286 listsubrepos=True)
1286 listsubrepos=True)
1287 for rfiles, sfiles in zip(r, s):
1287 for rfiles, sfiles in zip(r, s):
1288 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1288 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1289 except error.LookupError:
1289 except error.LookupError:
1290 self.ui.status(_("skipping missing subrepository: %s\n")
1290 self.ui.status(_("skipping missing subrepository: %s\n")
1291 % subpath)
1291 % subpath)
1292
1292
1293 for l in r:
1293 for l in r:
1294 l.sort()
1294 l.sort()
1295 return r
1295 return r
1296
1296
1297 def heads(self, start=None):
1297 def heads(self, start=None):
1298 heads = self.changelog.heads(start)
1298 heads = self.changelog.heads(start)
1299 # sort the output in rev descending order
1299 # sort the output in rev descending order
1300 return sorted(heads, key=self.changelog.rev, reverse=True)
1300 return sorted(heads, key=self.changelog.rev, reverse=True)
1301
1301
1302 def branchheads(self, branch=None, start=None, closed=False):
1302 def branchheads(self, branch=None, start=None, closed=False):
1303 '''return a (possibly filtered) list of heads for the given branch
1303 '''return a (possibly filtered) list of heads for the given branch
1304
1304
1305 Heads are returned in topological order, from newest to oldest.
1305 Heads are returned in topological order, from newest to oldest.
1306 If branch is None, use the dirstate branch.
1306 If branch is None, use the dirstate branch.
1307 If start is not None, return only heads reachable from start.
1307 If start is not None, return only heads reachable from start.
1308 If closed is True, return heads that are marked as closed as well.
1308 If closed is True, return heads that are marked as closed as well.
1309 '''
1309 '''
1310 if branch is None:
1310 if branch is None:
1311 branch = self[None].branch()
1311 branch = self[None].branch()
1312 branches = self.branchmap()
1312 branches = self.branchmap()
1313 if branch not in branches:
1313 if branch not in branches:
1314 return []
1314 return []
1315 # the cache returns heads ordered lowest to highest
1315 # the cache returns heads ordered lowest to highest
1316 bheads = list(reversed(branches[branch]))
1316 bheads = list(reversed(branches[branch]))
1317 if start is not None:
1317 if start is not None:
1318 # filter out the heads that cannot be reached from startrev
1318 # filter out the heads that cannot be reached from startrev
1319 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1319 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1320 bheads = [h for h in bheads if h in fbheads]
1320 bheads = [h for h in bheads if h in fbheads]
1321 if not closed:
1321 if not closed:
1322 bheads = [h for h in bheads if
1322 bheads = [h for h in bheads if
1323 ('close' not in self.changelog.read(h)[5])]
1323 ('close' not in self.changelog.read(h)[5])]
1324 return bheads
1324 return bheads
1325
1325
1326 def branches(self, nodes):
1326 def branches(self, nodes):
1327 if not nodes:
1327 if not nodes:
1328 nodes = [self.changelog.tip()]
1328 nodes = [self.changelog.tip()]
1329 b = []
1329 b = []
1330 for n in nodes:
1330 for n in nodes:
1331 t = n
1331 t = n
1332 while True:
1332 while True:
1333 p = self.changelog.parents(n)
1333 p = self.changelog.parents(n)
1334 if p[1] != nullid or p[0] == nullid:
1334 if p[1] != nullid or p[0] == nullid:
1335 b.append((t, n, p[0], p[1]))
1335 b.append((t, n, p[0], p[1]))
1336 break
1336 break
1337 n = p[0]
1337 n = p[0]
1338 return b
1338 return b
1339
1339
1340 def between(self, pairs):
1340 def between(self, pairs):
1341 r = []
1341 r = []
1342
1342
1343 for top, bottom in pairs:
1343 for top, bottom in pairs:
1344 n, l, i = top, [], 0
1344 n, l, i = top, [], 0
1345 f = 1
1345 f = 1
1346
1346
1347 while n != bottom and n != nullid:
1347 while n != bottom and n != nullid:
1348 p = self.changelog.parents(n)[0]
1348 p = self.changelog.parents(n)[0]
1349 if i == f:
1349 if i == f:
1350 l.append(n)
1350 l.append(n)
1351 f = f * 2
1351 f = f * 2
1352 n = p
1352 n = p
1353 i += 1
1353 i += 1
1354
1354
1355 r.append(l)
1355 r.append(l)
1356
1356
1357 return r
1357 return r
1358
1358
1359 def pull(self, remote, heads=None, force=False):
1359 def pull(self, remote, heads=None, force=False):
1360 lock = self.lock()
1360 lock = self.lock()
1361 try:
1361 try:
1362 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1362 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1363 force=force)
1363 force=force)
1364 common, fetch, rheads = tmp
1364 common, fetch, rheads = tmp
1365 if not fetch:
1365 if not fetch:
1366 self.ui.status(_("no changes found\n"))
1366 self.ui.status(_("no changes found\n"))
1367 result = 0
1367 result = 0
1368 else:
1368 else:
1369 if heads is None and list(common) == [nullid]:
1369 if heads is None and list(common) == [nullid]:
1370 self.ui.status(_("requesting all changes\n"))
1370 self.ui.status(_("requesting all changes\n"))
1371 elif heads is None and remote.capable('changegroupsubset'):
1371 elif heads is None and remote.capable('changegroupsubset'):
1372 # issue1320, avoid a race if remote changed after discovery
1372 # issue1320, avoid a race if remote changed after discovery
1373 heads = rheads
1373 heads = rheads
1374
1374
1375 if remote.capable('getbundle'):
1375 if remote.capable('getbundle'):
1376 cg = remote.getbundle('pull', common=common,
1376 cg = remote.getbundle('pull', common=common,
1377 heads=heads or rheads)
1377 heads=heads or rheads)
1378 elif heads is None:
1378 elif heads is None:
1379 cg = remote.changegroup(fetch, 'pull')
1379 cg = remote.changegroup(fetch, 'pull')
1380 elif not remote.capable('changegroupsubset'):
1380 elif not remote.capable('changegroupsubset'):
1381 raise util.Abort(_("partial pull cannot be done because "
1381 raise util.Abort(_("partial pull cannot be done because "
1382 "other repository doesn't support "
1382 "other repository doesn't support "
1383 "changegroupsubset."))
1383 "changegroupsubset."))
1384 else:
1384 else:
1385 cg = remote.changegroupsubset(fetch, heads, 'pull')
1385 cg = remote.changegroupsubset(fetch, heads, 'pull')
1386 result = self.addchangegroup(cg, 'pull', remote.url(),
1386 result = self.addchangegroup(cg, 'pull', remote.url(),
1387 lock=lock)
1387 lock=lock)
1388 finally:
1388 finally:
1389 lock.release()
1389 lock.release()
1390
1390
1391 return result
1391 return result
1392
1392
1393 def checkpush(self, force, revs):
1393 def checkpush(self, force, revs):
1394 """Extensions can override this function if additional checks have
1394 """Extensions can override this function if additional checks have
1395 to be performed before pushing, or call it if they override push
1395 to be performed before pushing, or call it if they override push
1396 command.
1396 command.
1397 """
1397 """
1398 pass
1398 pass
1399
1399
1400 def push(self, remote, force=False, revs=None, newbranch=False):
1400 def push(self, remote, force=False, revs=None, newbranch=False):
1401 '''Push outgoing changesets (limited by revs) from the current
1401 '''Push outgoing changesets (limited by revs) from the current
1402 repository to remote. Return an integer:
1402 repository to remote. Return an integer:
1403 - 0 means HTTP error *or* nothing to push
1403 - 0 means HTTP error *or* nothing to push
1404 - 1 means we pushed and remote head count is unchanged *or*
1404 - 1 means we pushed and remote head count is unchanged *or*
1405 we have outgoing changesets but refused to push
1405 we have outgoing changesets but refused to push
1406 - other values as described by addchangegroup()
1406 - other values as described by addchangegroup()
1407 '''
1407 '''
1408 # there are two ways to push to remote repo:
1408 # there are two ways to push to remote repo:
1409 #
1409 #
1410 # addchangegroup assumes local user can lock remote
1410 # addchangegroup assumes local user can lock remote
1411 # repo (local filesystem, old ssh servers).
1411 # repo (local filesystem, old ssh servers).
1412 #
1412 #
1413 # unbundle assumes local user cannot lock remote repo (new ssh
1413 # unbundle assumes local user cannot lock remote repo (new ssh
1414 # servers, http servers).
1414 # servers, http servers).
1415
1415
1416 self.checkpush(force, revs)
1416 self.checkpush(force, revs)
1417 lock = None
1417 lock = None
1418 unbundle = remote.capable('unbundle')
1418 unbundle = remote.capable('unbundle')
1419 if not unbundle:
1419 if not unbundle:
1420 lock = remote.lock()
1420 lock = remote.lock()
1421 try:
1421 try:
1422 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1422 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1423 newbranch)
1423 newbranch)
1424 ret = remote_heads
1424 ret = remote_heads
1425 if cg is not None:
1425 if cg is not None:
1426 if unbundle:
1426 if unbundle:
1427 # local repo finds heads on server, finds out what
1427 # local repo finds heads on server, finds out what
1428 # revs it must push. once revs transferred, if server
1428 # revs it must push. once revs transferred, if server
1429 # finds it has different heads (someone else won
1429 # finds it has different heads (someone else won
1430 # commit/push race), server aborts.
1430 # commit/push race), server aborts.
1431 if force:
1431 if force:
1432 remote_heads = ['force']
1432 remote_heads = ['force']
1433 # ssh: return remote's addchangegroup()
1433 # ssh: return remote's addchangegroup()
1434 # http: return remote's addchangegroup() or 0 for error
1434 # http: return remote's addchangegroup() or 0 for error
1435 ret = remote.unbundle(cg, remote_heads, 'push')
1435 ret = remote.unbundle(cg, remote_heads, 'push')
1436 else:
1436 else:
1437 # we return an integer indicating remote head count change
1437 # we return an integer indicating remote head count change
1438 ret = remote.addchangegroup(cg, 'push', self.url(),
1438 ret = remote.addchangegroup(cg, 'push', self.url(),
1439 lock=lock)
1439 lock=lock)
1440 finally:
1440 finally:
1441 if lock is not None:
1441 if lock is not None:
1442 lock.release()
1442 lock.release()
1443
1443
1444 self.ui.debug("checking for updated bookmarks\n")
1444 self.ui.debug("checking for updated bookmarks\n")
1445 rb = remote.listkeys('bookmarks')
1445 rb = remote.listkeys('bookmarks')
1446 for k in rb.keys():
1446 for k in rb.keys():
1447 if k in self._bookmarks:
1447 if k in self._bookmarks:
1448 nr, nl = rb[k], hex(self._bookmarks[k])
1448 nr, nl = rb[k], hex(self._bookmarks[k])
1449 if nr in self:
1449 if nr in self:
1450 cr = self[nr]
1450 cr = self[nr]
1451 cl = self[nl]
1451 cl = self[nl]
1452 if cl in cr.descendants():
1452 if cl in cr.descendants():
1453 r = remote.pushkey('bookmarks', k, nr, nl)
1453 r = remote.pushkey('bookmarks', k, nr, nl)
1454 if r:
1454 if r:
1455 self.ui.status(_("updating bookmark %s\n") % k)
1455 self.ui.status(_("updating bookmark %s\n") % k)
1456 else:
1456 else:
1457 self.ui.warn(_('updating bookmark %s'
1457 self.ui.warn(_('updating bookmark %s'
1458 ' failed!\n') % k)
1458 ' failed!\n') % k)
1459
1459
1460 return ret
1460 return ret
1461
1461
1462 def changegroupinfo(self, nodes, source):
1462 def changegroupinfo(self, nodes, source):
1463 if self.ui.verbose or source == 'bundle':
1463 if self.ui.verbose or source == 'bundle':
1464 self.ui.status(_("%d changesets found\n") % len(nodes))
1464 self.ui.status(_("%d changesets found\n") % len(nodes))
1465 if self.ui.debugflag:
1465 if self.ui.debugflag:
1466 self.ui.debug("list of changesets:\n")
1466 self.ui.debug("list of changesets:\n")
1467 for node in nodes:
1467 for node in nodes:
1468 self.ui.debug("%s\n" % hex(node))
1468 self.ui.debug("%s\n" % hex(node))
1469
1469
1470 def changegroupsubset(self, bases, heads, source):
1470 def changegroupsubset(self, bases, heads, source):
1471 """Compute a changegroup consisting of all the nodes that are
1471 """Compute a changegroup consisting of all the nodes that are
1472 descendants of any of the bases and ancestors of any of the heads.
1472 descendants of any of the bases and ancestors of any of the heads.
1473 Return a chunkbuffer object whose read() method will return
1473 Return a chunkbuffer object whose read() method will return
1474 successive changegroup chunks.
1474 successive changegroup chunks.
1475
1475
1476 It is fairly complex as determining which filenodes and which
1476 It is fairly complex as determining which filenodes and which
1477 manifest nodes need to be included for the changeset to be complete
1477 manifest nodes need to be included for the changeset to be complete
1478 is non-trivial.
1478 is non-trivial.
1479
1479
1480 Another wrinkle is doing the reverse, figuring out which changeset in
1480 Another wrinkle is doing the reverse, figuring out which changeset in
1481 the changegroup a particular filenode or manifestnode belongs to.
1481 the changegroup a particular filenode or manifestnode belongs to.
1482 """
1482 """
1483 cl = self.changelog
1483 cl = self.changelog
1484 if not bases:
1484 if not bases:
1485 bases = [nullid]
1485 bases = [nullid]
1486 csets, bases, heads = cl.nodesbetween(bases, heads)
1486 csets, bases, heads = cl.nodesbetween(bases, heads)
1487 # We assume that all ancestors of bases are known
1487 # We assume that all ancestors of bases are known
1488 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1488 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1489 return self._changegroupsubset(common, csets, heads, source)
1489 return self._changegroupsubset(common, csets, heads, source)
1490
1490
1491 def getbundle(self, source, heads=None, common=None):
1491 def getbundle(self, source, heads=None, common=None):
1492 """Like changegroupsubset, but returns the set difference between the
1492 """Like changegroupsubset, but returns the set difference between the
1493 ancestors of heads and the ancestors common.
1493 ancestors of heads and the ancestors common.
1494
1494
1495 If heads is None, use the local heads. If common is None, use [nullid].
1495 If heads is None, use the local heads. If common is None, use [nullid].
1496
1496
1497 The nodes in common might not all be known locally due to the way the
1497 The nodes in common might not all be known locally due to the way the
1498 current discovery protocol works.
1498 current discovery protocol works.
1499 """
1499 """
1500 cl = self.changelog
1500 cl = self.changelog
1501 if common:
1501 if common:
1502 nm = cl.nodemap
1502 nm = cl.nodemap
1503 common = [n for n in common if n in nm]
1503 common = [n for n in common if n in nm]
1504 else:
1504 else:
1505 common = [nullid]
1505 common = [nullid]
1506 if not heads:
1506 if not heads:
1507 heads = cl.heads()
1507 heads = cl.heads()
1508 common, missing = cl.findcommonmissing(common, heads)
1508 common, missing = cl.findcommonmissing(common, heads)
1509 if not missing:
1509 if not missing:
1510 return None
1510 return None
1511 return self._changegroupsubset(common, missing, heads, source)
1511 return self._changegroupsubset(common, missing, heads, source)
1512
1512
1513 def _changegroupsubset(self, commonrevs, csets, heads, source):
1513 def _changegroupsubset(self, commonrevs, csets, heads, source):
1514
1514
1515 cl = self.changelog
1515 cl = self.changelog
1516 mf = self.manifest
1516 mf = self.manifest
1517 mfs = {} # needed manifests
1517 mfs = {} # needed manifests
1518 fnodes = {} # needed file nodes
1518 fnodes = {} # needed file nodes
1519 changedfiles = set()
1519 changedfiles = set()
1520 fstate = ['', {}]
1520 fstate = ['', {}]
1521 count = [0]
1521 count = [0]
1522
1522
1523 # can we go through the fast path ?
1523 # can we go through the fast path ?
1524 heads.sort()
1524 heads.sort()
1525 if heads == sorted(self.heads()):
1525 if heads == sorted(self.heads()):
1526 return self._changegroup(csets, source)
1526 return self._changegroup(csets, source)
1527
1527
1528 # slow path
1528 # slow path
1529 self.hook('preoutgoing', throw=True, source=source)
1529 self.hook('preoutgoing', throw=True, source=source)
1530 self.changegroupinfo(csets, source)
1530 self.changegroupinfo(csets, source)
1531
1531
1532 # filter any nodes that claim to be part of the known set
1532 # filter any nodes that claim to be part of the known set
1533 def prune(revlog, missing):
1533 def prune(revlog, missing):
1534 return [n for n in missing
1534 return [n for n in missing
1535 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1535 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1536
1536
1537 def lookup(revlog, x):
1537 def lookup(revlog, x):
1538 if revlog == cl:
1538 if revlog == cl:
1539 c = cl.read(x)
1539 c = cl.read(x)
1540 changedfiles.update(c[3])
1540 changedfiles.update(c[3])
1541 mfs.setdefault(c[0], x)
1541 mfs.setdefault(c[0], x)
1542 count[0] += 1
1542 count[0] += 1
1543 self.ui.progress(_('bundling'), count[0],
1543 self.ui.progress(_('bundling'), count[0],
1544 unit=_('changesets'), total=len(csets))
1544 unit=_('changesets'), total=len(csets))
1545 return x
1545 return x
1546 elif revlog == mf:
1546 elif revlog == mf:
1547 clnode = mfs[x]
1547 clnode = mfs[x]
1548 mdata = mf.readfast(x)
1548 mdata = mf.readfast(x)
1549 for f in changedfiles:
1549 for f in changedfiles:
1550 if f in mdata:
1550 if f in mdata:
1551 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1551 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1552 count[0] += 1
1552 count[0] += 1
1553 self.ui.progress(_('bundling'), count[0],
1553 self.ui.progress(_('bundling'), count[0],
1554 unit=_('manifests'), total=len(mfs))
1554 unit=_('manifests'), total=len(mfs))
1555 return mfs[x]
1555 return mfs[x]
1556 else:
1556 else:
1557 self.ui.progress(
1557 self.ui.progress(
1558 _('bundling'), count[0], item=fstate[0],
1558 _('bundling'), count[0], item=fstate[0],
1559 unit=_('files'), total=len(changedfiles))
1559 unit=_('files'), total=len(changedfiles))
1560 return fstate[1][x]
1560 return fstate[1][x]
1561
1561
1562 bundler = changegroup.bundle10(lookup)
1562 bundler = changegroup.bundle10(lookup)
1563 reorder = self.ui.config('bundle', 'reorder', 'auto')
1563 reorder = self.ui.config('bundle', 'reorder', 'auto')
1564 if reorder == 'auto':
1564 if reorder == 'auto':
1565 reorder = None
1565 reorder = None
1566 else:
1566 else:
1567 reorder = util.parsebool(reorder)
1567 reorder = util.parsebool(reorder)
1568
1568
1569 def gengroup():
1569 def gengroup():
1570 # Create a changenode group generator that will call our functions
1570 # Create a changenode group generator that will call our functions
1571 # back to lookup the owning changenode and collect information.
1571 # back to lookup the owning changenode and collect information.
1572 for chunk in cl.group(csets, bundler, reorder=reorder):
1572 for chunk in cl.group(csets, bundler, reorder=reorder):
1573 yield chunk
1573 yield chunk
1574 self.ui.progress(_('bundling'), None)
1574 self.ui.progress(_('bundling'), None)
1575
1575
1576 # Create a generator for the manifestnodes that calls our lookup
1576 # Create a generator for the manifestnodes that calls our lookup
1577 # and data collection functions back.
1577 # and data collection functions back.
1578 count[0] = 0
1578 count[0] = 0
1579 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1579 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1580 yield chunk
1580 yield chunk
1581 self.ui.progress(_('bundling'), None)
1581 self.ui.progress(_('bundling'), None)
1582
1582
1583 mfs.clear()
1583 mfs.clear()
1584
1584
1585 # Go through all our files in order sorted by name.
1585 # Go through all our files in order sorted by name.
1586 count[0] = 0
1586 count[0] = 0
1587 for fname in sorted(changedfiles):
1587 for fname in sorted(changedfiles):
1588 filerevlog = self.file(fname)
1588 filerevlog = self.file(fname)
1589 if not len(filerevlog):
1589 if not len(filerevlog):
1590 raise util.Abort(_("empty or missing revlog for %s") % fname)
1590 raise util.Abort(_("empty or missing revlog for %s") % fname)
1591 fstate[0] = fname
1591 fstate[0] = fname
1592 fstate[1] = fnodes.pop(fname, {})
1592 fstate[1] = fnodes.pop(fname, {})
1593
1593
1594 nodelist = prune(filerevlog, fstate[1])
1594 nodelist = prune(filerevlog, fstate[1])
1595 if nodelist:
1595 if nodelist:
1596 count[0] += 1
1596 count[0] += 1
1597 yield bundler.fileheader(fname)
1597 yield bundler.fileheader(fname)
1598 for chunk in filerevlog.group(nodelist, bundler, reorder):
1598 for chunk in filerevlog.group(nodelist, bundler, reorder):
1599 yield chunk
1599 yield chunk
1600
1600
1601 # Signal that no more groups are left.
1601 # Signal that no more groups are left.
1602 yield bundler.close()
1602 yield bundler.close()
1603 self.ui.progress(_('bundling'), None)
1603 self.ui.progress(_('bundling'), None)
1604
1604
1605 if csets:
1605 if csets:
1606 self.hook('outgoing', node=hex(csets[0]), source=source)
1606 self.hook('outgoing', node=hex(csets[0]), source=source)
1607
1607
1608 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1608 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1609
1609
1610 def changegroup(self, basenodes, source):
1610 def changegroup(self, basenodes, source):
1611 # to avoid a race we use changegroupsubset() (issue1320)
1611 # to avoid a race we use changegroupsubset() (issue1320)
1612 return self.changegroupsubset(basenodes, self.heads(), source)
1612 return self.changegroupsubset(basenodes, self.heads(), source)
1613
1613
1614 def _changegroup(self, nodes, source):
1614 def _changegroup(self, nodes, source):
1615 """Compute the changegroup of all nodes that we have that a recipient
1615 """Compute the changegroup of all nodes that we have that a recipient
1616 doesn't. Return a chunkbuffer object whose read() method will return
1616 doesn't. Return a chunkbuffer object whose read() method will return
1617 successive changegroup chunks.
1617 successive changegroup chunks.
1618
1618
1619 This is much easier than the previous function as we can assume that
1619 This is much easier than the previous function as we can assume that
1620 the recipient has any changenode we aren't sending them.
1620 the recipient has any changenode we aren't sending them.
1621
1621
1622 nodes is the set of nodes to send"""
1622 nodes is the set of nodes to send"""
1623
1623
1624 cl = self.changelog
1624 cl = self.changelog
1625 mf = self.manifest
1625 mf = self.manifest
1626 mfs = {}
1626 mfs = {}
1627 changedfiles = set()
1627 changedfiles = set()
1628 fstate = ['']
1628 fstate = ['']
1629 count = [0]
1629 count = [0]
1630
1630
1631 self.hook('preoutgoing', throw=True, source=source)
1631 self.hook('preoutgoing', throw=True, source=source)
1632 self.changegroupinfo(nodes, source)
1632 self.changegroupinfo(nodes, source)
1633
1633
1634 revset = set([cl.rev(n) for n in nodes])
1634 revset = set([cl.rev(n) for n in nodes])
1635
1635
1636 def gennodelst(log):
1636 def gennodelst(log):
1637 return [log.node(r) for r in log if log.linkrev(r) in revset]
1637 return [log.node(r) for r in log if log.linkrev(r) in revset]
1638
1638
1639 def lookup(revlog, x):
1639 def lookup(revlog, x):
1640 if revlog == cl:
1640 if revlog == cl:
1641 c = cl.read(x)
1641 c = cl.read(x)
1642 changedfiles.update(c[3])
1642 changedfiles.update(c[3])
1643 mfs.setdefault(c[0], x)
1643 mfs.setdefault(c[0], x)
1644 count[0] += 1
1644 count[0] += 1
1645 self.ui.progress(_('bundling'), count[0],
1645 self.ui.progress(_('bundling'), count[0],
1646 unit=_('changesets'), total=len(nodes))
1646 unit=_('changesets'), total=len(nodes))
1647 return x
1647 return x
1648 elif revlog == mf:
1648 elif revlog == mf:
1649 count[0] += 1
1649 count[0] += 1
1650 self.ui.progress(_('bundling'), count[0],
1650 self.ui.progress(_('bundling'), count[0],
1651 unit=_('manifests'), total=len(mfs))
1651 unit=_('manifests'), total=len(mfs))
1652 return cl.node(revlog.linkrev(revlog.rev(x)))
1652 return cl.node(revlog.linkrev(revlog.rev(x)))
1653 else:
1653 else:
1654 self.ui.progress(
1654 self.ui.progress(
1655 _('bundling'), count[0], item=fstate[0],
1655 _('bundling'), count[0], item=fstate[0],
1656 total=len(changedfiles), unit=_('files'))
1656 total=len(changedfiles), unit=_('files'))
1657 return cl.node(revlog.linkrev(revlog.rev(x)))
1657 return cl.node(revlog.linkrev(revlog.rev(x)))
1658
1658
1659 bundler = changegroup.bundle10(lookup)
1659 bundler = changegroup.bundle10(lookup)
1660 reorder = self.ui.config('bundle', 'reorder', 'auto')
1660 reorder = self.ui.config('bundle', 'reorder', 'auto')
1661 if reorder == 'auto':
1661 if reorder == 'auto':
1662 reorder = None
1662 reorder = None
1663 else:
1663 else:
1664 reorder = util.parsebool(reorder)
1664 reorder = util.parsebool(reorder)
1665
1665
1666 def gengroup():
1666 def gengroup():
1667 '''yield a sequence of changegroup chunks (strings)'''
1667 '''yield a sequence of changegroup chunks (strings)'''
1668 # construct a list of all changed files
1668 # construct a list of all changed files
1669
1669
1670 for chunk in cl.group(nodes, bundler, reorder=reorder):
1670 for chunk in cl.group(nodes, bundler, reorder=reorder):
1671 yield chunk
1671 yield chunk
1672 self.ui.progress(_('bundling'), None)
1672 self.ui.progress(_('bundling'), None)
1673
1673
1674 count[0] = 0
1674 count[0] = 0
1675 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1675 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1676 yield chunk
1676 yield chunk
1677 self.ui.progress(_('bundling'), None)
1677 self.ui.progress(_('bundling'), None)
1678
1678
1679 count[0] = 0
1679 count[0] = 0
1680 for fname in sorted(changedfiles):
1680 for fname in sorted(changedfiles):
1681 filerevlog = self.file(fname)
1681 filerevlog = self.file(fname)
1682 if not len(filerevlog):
1682 if not len(filerevlog):
1683 raise util.Abort(_("empty or missing revlog for %s") % fname)
1683 raise util.Abort(_("empty or missing revlog for %s") % fname)
1684 fstate[0] = fname
1684 fstate[0] = fname
1685 nodelist = gennodelst(filerevlog)
1685 nodelist = gennodelst(filerevlog)
1686 if nodelist:
1686 if nodelist:
1687 count[0] += 1
1687 count[0] += 1
1688 yield bundler.fileheader(fname)
1688 yield bundler.fileheader(fname)
1689 for chunk in filerevlog.group(nodelist, bundler, reorder):
1689 for chunk in filerevlog.group(nodelist, bundler, reorder):
1690 yield chunk
1690 yield chunk
1691 yield bundler.close()
1691 yield bundler.close()
1692 self.ui.progress(_('bundling'), None)
1692 self.ui.progress(_('bundling'), None)
1693
1693
1694 if nodes:
1694 if nodes:
1695 self.hook('outgoing', node=hex(nodes[0]), source=source)
1695 self.hook('outgoing', node=hex(nodes[0]), source=source)
1696
1696
1697 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1697 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1698
1698
1699 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1699 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1700 """Add the changegroup returned by source.read() to this repo.
1700 """Add the changegroup returned by source.read() to this repo.
1701 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1701 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1702 the URL of the repo where this changegroup is coming from.
1702 the URL of the repo where this changegroup is coming from.
1703 If lock is not None, the function takes ownership of the lock
1703 If lock is not None, the function takes ownership of the lock
1704 and releases it after the changegroup is added.
1704 and releases it after the changegroup is added.
1705
1705
1706 Return an integer summarizing the change to this repo:
1706 Return an integer summarizing the change to this repo:
1707 - nothing changed or no source: 0
1707 - nothing changed or no source: 0
1708 - more heads than before: 1+added heads (2..n)
1708 - more heads than before: 1+added heads (2..n)
1709 - fewer heads than before: -1-removed heads (-2..-n)
1709 - fewer heads than before: -1-removed heads (-2..-n)
1710 - number of heads stays the same: 1
1710 - number of heads stays the same: 1
1711 """
1711 """
1712 def csmap(x):
1712 def csmap(x):
1713 self.ui.debug("add changeset %s\n" % short(x))
1713 self.ui.debug("add changeset %s\n" % short(x))
1714 return len(cl)
1714 return len(cl)
1715
1715
1716 def revmap(x):
1716 def revmap(x):
1717 return cl.rev(x)
1717 return cl.rev(x)
1718
1718
1719 if not source:
1719 if not source:
1720 return 0
1720 return 0
1721
1721
1722 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1722 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1723
1723
1724 changesets = files = revisions = 0
1724 changesets = files = revisions = 0
1725 efiles = set()
1725 efiles = set()
1726
1726
1727 # write changelog data to temp files so concurrent readers will not see
1727 # write changelog data to temp files so concurrent readers will not see
1728 # inconsistent view
1728 # inconsistent view
1729 cl = self.changelog
1729 cl = self.changelog
1730 cl.delayupdate()
1730 cl.delayupdate()
1731 oldheads = cl.heads()
1731 oldheads = cl.heads()
1732
1732
1733 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1733 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1734 try:
1734 try:
1735 trp = weakref.proxy(tr)
1735 trp = weakref.proxy(tr)
1736 # pull off the changeset group
1736 # pull off the changeset group
1737 self.ui.status(_("adding changesets\n"))
1737 self.ui.status(_("adding changesets\n"))
1738 clstart = len(cl)
1738 clstart = len(cl)
1739 class prog(object):
1739 class prog(object):
1740 step = _('changesets')
1740 step = _('changesets')
1741 count = 1
1741 count = 1
1742 ui = self.ui
1742 ui = self.ui
1743 total = None
1743 total = None
1744 def __call__(self):
1744 def __call__(self):
1745 self.ui.progress(self.step, self.count, unit=_('chunks'),
1745 self.ui.progress(self.step, self.count, unit=_('chunks'),
1746 total=self.total)
1746 total=self.total)
1747 self.count += 1
1747 self.count += 1
1748 pr = prog()
1748 pr = prog()
1749 source.callback = pr
1749 source.callback = pr
1750
1750
1751 source.changelogheader()
1751 source.changelogheader()
1752 if (cl.addgroup(source, csmap, trp) is None
1752 if (cl.addgroup(source, csmap, trp) is None
1753 and not emptyok):
1753 and not emptyok):
1754 raise util.Abort(_("received changelog group is empty"))
1754 raise util.Abort(_("received changelog group is empty"))
1755 clend = len(cl)
1755 clend = len(cl)
1756 changesets = clend - clstart
1756 changesets = clend - clstart
1757 for c in xrange(clstart, clend):
1757 for c in xrange(clstart, clend):
1758 efiles.update(self[c].files())
1758 efiles.update(self[c].files())
1759 efiles = len(efiles)
1759 efiles = len(efiles)
1760 self.ui.progress(_('changesets'), None)
1760 self.ui.progress(_('changesets'), None)
1761
1761
1762 # pull off the manifest group
1762 # pull off the manifest group
1763 self.ui.status(_("adding manifests\n"))
1763 self.ui.status(_("adding manifests\n"))
1764 pr.step = _('manifests')
1764 pr.step = _('manifests')
1765 pr.count = 1
1765 pr.count = 1
1766 pr.total = changesets # manifests <= changesets
1766 pr.total = changesets # manifests <= changesets
1767 # no need to check for empty manifest group here:
1767 # no need to check for empty manifest group here:
1768 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1768 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1769 # no new manifest will be created and the manifest group will
1769 # no new manifest will be created and the manifest group will
1770 # be empty during the pull
1770 # be empty during the pull
1771 source.manifestheader()
1771 source.manifestheader()
1772 self.manifest.addgroup(source, revmap, trp)
1772 self.manifest.addgroup(source, revmap, trp)
1773 self.ui.progress(_('manifests'), None)
1773 self.ui.progress(_('manifests'), None)
1774
1774
1775 needfiles = {}
1775 needfiles = {}
1776 if self.ui.configbool('server', 'validate', default=False):
1776 if self.ui.configbool('server', 'validate', default=False):
1777 # validate incoming csets have their manifests
1777 # validate incoming csets have their manifests
1778 for cset in xrange(clstart, clend):
1778 for cset in xrange(clstart, clend):
1779 mfest = self.changelog.read(self.changelog.node(cset))[0]
1779 mfest = self.changelog.read(self.changelog.node(cset))[0]
1780 mfest = self.manifest.readdelta(mfest)
1780 mfest = self.manifest.readdelta(mfest)
1781 # store file nodes we must see
1781 # store file nodes we must see
1782 for f, n in mfest.iteritems():
1782 for f, n in mfest.iteritems():
1783 needfiles.setdefault(f, set()).add(n)
1783 needfiles.setdefault(f, set()).add(n)
1784
1784
1785 # process the files
1785 # process the files
1786 self.ui.status(_("adding file changes\n"))
1786 self.ui.status(_("adding file changes\n"))
1787 pr.step = _('files')
1787 pr.step = _('files')
1788 pr.count = 1
1788 pr.count = 1
1789 pr.total = efiles
1789 pr.total = efiles
1790 source.callback = None
1790 source.callback = None
1791
1791
1792 while True:
1792 while True:
1793 chunkdata = source.filelogheader()
1793 chunkdata = source.filelogheader()
1794 if not chunkdata:
1794 if not chunkdata:
1795 break
1795 break
1796 f = chunkdata["filename"]
1796 f = chunkdata["filename"]
1797 self.ui.debug("adding %s revisions\n" % f)
1797 self.ui.debug("adding %s revisions\n" % f)
1798 pr()
1798 pr()
1799 fl = self.file(f)
1799 fl = self.file(f)
1800 o = len(fl)
1800 o = len(fl)
1801 if fl.addgroup(source, revmap, trp) is None:
1801 if fl.addgroup(source, revmap, trp) is None:
1802 raise util.Abort(_("received file revlog group is empty"))
1802 raise util.Abort(_("received file revlog group is empty"))
1803 revisions += len(fl) - o
1803 revisions += len(fl) - o
1804 files += 1
1804 files += 1
1805 if f in needfiles:
1805 if f in needfiles:
1806 needs = needfiles[f]
1806 needs = needfiles[f]
1807 for new in xrange(o, len(fl)):
1807 for new in xrange(o, len(fl)):
1808 n = fl.node(new)
1808 n = fl.node(new)
1809 if n in needs:
1809 if n in needs:
1810 needs.remove(n)
1810 needs.remove(n)
1811 if not needs:
1811 if not needs:
1812 del needfiles[f]
1812 del needfiles[f]
1813 self.ui.progress(_('files'), None)
1813 self.ui.progress(_('files'), None)
1814
1814
1815 for f, needs in needfiles.iteritems():
1815 for f, needs in needfiles.iteritems():
1816 fl = self.file(f)
1816 fl = self.file(f)
1817 for n in needs:
1817 for n in needs:
1818 try:
1818 try:
1819 fl.rev(n)
1819 fl.rev(n)
1820 except error.LookupError:
1820 except error.LookupError:
1821 raise util.Abort(
1821 raise util.Abort(
1822 _('missing file data for %s:%s - run hg verify') %
1822 _('missing file data for %s:%s - run hg verify') %
1823 (f, hex(n)))
1823 (f, hex(n)))
1824
1824
1825 dh = 0
1825 dh = 0
1826 if oldheads:
1826 if oldheads:
1827 heads = cl.heads()
1827 heads = cl.heads()
1828 dh = len(heads) - len(oldheads)
1828 dh = len(heads) - len(oldheads)
1829 for h in heads:
1829 for h in heads:
1830 if h not in oldheads and 'close' in self[h].extra():
1830 if h not in oldheads and 'close' in self[h].extra():
1831 dh -= 1
1831 dh -= 1
1832 htext = ""
1832 htext = ""
1833 if dh:
1833 if dh:
1834 htext = _(" (%+d heads)") % dh
1834 htext = _(" (%+d heads)") % dh
1835
1835
1836 self.ui.status(_("added %d changesets"
1836 self.ui.status(_("added %d changesets"
1837 " with %d changes to %d files%s\n")
1837 " with %d changes to %d files%s\n")
1838 % (changesets, revisions, files, htext))
1838 % (changesets, revisions, files, htext))
1839
1839
1840 if changesets > 0:
1840 if changesets > 0:
1841 p = lambda: cl.writepending() and self.root or ""
1841 p = lambda: cl.writepending() and self.root or ""
1842 self.hook('pretxnchangegroup', throw=True,
1842 self.hook('pretxnchangegroup', throw=True,
1843 node=hex(cl.node(clstart)), source=srctype,
1843 node=hex(cl.node(clstart)), source=srctype,
1844 url=url, pending=p)
1844 url=url, pending=p)
1845
1845
1846 # make changelog see real files again
1846 # make changelog see real files again
1847 cl.finalize(trp)
1847 cl.finalize(trp)
1848
1848
1849 tr.close()
1849 tr.close()
1850 finally:
1850 finally:
1851 tr.release()
1851 tr.release()
1852 if lock:
1852 if lock:
1853 lock.release()
1853 lock.release()
1854
1854
1855 if changesets > 0:
1855 if changesets > 0:
1856 # forcefully update the on-disk branch cache
1856 # forcefully update the on-disk branch cache
1857 self.ui.debug("updating the branch cache\n")
1857 self.ui.debug("updating the branch cache\n")
1858 self.updatebranchcache()
1858 self.updatebranchcache()
1859 self.hook("changegroup", node=hex(cl.node(clstart)),
1859 self.hook("changegroup", node=hex(cl.node(clstart)),
1860 source=srctype, url=url)
1860 source=srctype, url=url)
1861
1861
1862 for i in xrange(clstart, clend):
1862 for i in xrange(clstart, clend):
1863 self.hook("incoming", node=hex(cl.node(i)),
1863 self.hook("incoming", node=hex(cl.node(i)),
1864 source=srctype, url=url)
1864 source=srctype, url=url)
1865
1865
1866 # never return 0 here:
1866 # never return 0 here:
1867 if dh < 0:
1867 if dh < 0:
1868 return dh - 1
1868 return dh - 1
1869 else:
1869 else:
1870 return dh + 1
1870 return dh + 1
1871
1871
1872 def stream_in(self, remote, requirements):
1872 def stream_in(self, remote, requirements):
1873 lock = self.lock()
1873 lock = self.lock()
1874 try:
1874 try:
1875 fp = remote.stream_out()
1875 fp = remote.stream_out()
1876 l = fp.readline()
1876 l = fp.readline()
1877 try:
1877 try:
1878 resp = int(l)
1878 resp = int(l)
1879 except ValueError:
1879 except ValueError:
1880 raise error.ResponseError(
1880 raise error.ResponseError(
1881 _('Unexpected response from remote server:'), l)
1881 _('Unexpected response from remote server:'), l)
1882 if resp == 1:
1882 if resp == 1:
1883 raise util.Abort(_('operation forbidden by server'))
1883 raise util.Abort(_('operation forbidden by server'))
1884 elif resp == 2:
1884 elif resp == 2:
1885 raise util.Abort(_('locking the remote repository failed'))
1885 raise util.Abort(_('locking the remote repository failed'))
1886 elif resp != 0:
1886 elif resp != 0:
1887 raise util.Abort(_('the server sent an unknown error code'))
1887 raise util.Abort(_('the server sent an unknown error code'))
1888 self.ui.status(_('streaming all changes\n'))
1888 self.ui.status(_('streaming all changes\n'))
1889 l = fp.readline()
1889 l = fp.readline()
1890 try:
1890 try:
1891 total_files, total_bytes = map(int, l.split(' ', 1))
1891 total_files, total_bytes = map(int, l.split(' ', 1))
1892 except (ValueError, TypeError):
1892 except (ValueError, TypeError):
1893 raise error.ResponseError(
1893 raise error.ResponseError(
1894 _('Unexpected response from remote server:'), l)
1894 _('Unexpected response from remote server:'), l)
1895 self.ui.status(_('%d files to transfer, %s of data\n') %
1895 self.ui.status(_('%d files to transfer, %s of data\n') %
1896 (total_files, util.bytecount(total_bytes)))
1896 (total_files, util.bytecount(total_bytes)))
1897 start = time.time()
1897 start = time.time()
1898 for i in xrange(total_files):
1898 for i in xrange(total_files):
1899 # XXX doesn't support '\n' or '\r' in filenames
1899 # XXX doesn't support '\n' or '\r' in filenames
1900 l = fp.readline()
1900 l = fp.readline()
1901 try:
1901 try:
1902 name, size = l.split('\0', 1)
1902 name, size = l.split('\0', 1)
1903 size = int(size)
1903 size = int(size)
1904 except (ValueError, TypeError):
1904 except (ValueError, TypeError):
1905 raise error.ResponseError(
1905 raise error.ResponseError(
1906 _('Unexpected response from remote server:'), l)
1906 _('Unexpected response from remote server:'), l)
1907 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1907 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1908 # for backwards compat, name was partially encoded
1908 # for backwards compat, name was partially encoded
1909 ofp = self.sopener(store.decodedir(name), 'w')
1909 ofp = self.sopener(store.decodedir(name), 'w')
1910 for chunk in util.filechunkiter(fp, limit=size):
1910 for chunk in util.filechunkiter(fp, limit=size):
1911 ofp.write(chunk)
1911 ofp.write(chunk)
1912 ofp.close()
1912 ofp.close()
1913 elapsed = time.time() - start
1913 elapsed = time.time() - start
1914 if elapsed <= 0:
1914 if elapsed <= 0:
1915 elapsed = 0.001
1915 elapsed = 0.001
1916 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1916 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1917 (util.bytecount(total_bytes), elapsed,
1917 (util.bytecount(total_bytes), elapsed,
1918 util.bytecount(total_bytes / elapsed)))
1918 util.bytecount(total_bytes / elapsed)))
1919
1919
1920 # new requirements = old non-format requirements + new format-related
1920 # new requirements = old non-format requirements + new format-related
1921 # requirements from the streamed-in repository
1921 # requirements from the streamed-in repository
1922 requirements.update(set(self.requirements) - self.supportedformats)
1922 requirements.update(set(self.requirements) - self.supportedformats)
1923 self._applyrequirements(requirements)
1923 self._applyrequirements(requirements)
1924 self._writerequirements()
1924 self._writerequirements()
1925
1925
1926 self.invalidate()
1926 self.invalidate()
1927 return len(self.heads()) + 1
1927 return len(self.heads()) + 1
1928 finally:
1928 finally:
1929 lock.release()
1929 lock.release()
1930
1930
1931 def clone(self, remote, heads=[], stream=False):
1931 def clone(self, remote, heads=[], stream=False):
1932 '''clone remote repository.
1932 '''clone remote repository.
1933
1933
1934 keyword arguments:
1934 keyword arguments:
1935 heads: list of revs to clone (forces use of pull)
1935 heads: list of revs to clone (forces use of pull)
1936 stream: use streaming clone if possible'''
1936 stream: use streaming clone if possible'''
1937
1937
1938 # now, all clients that can request uncompressed clones can
1938 # now, all clients that can request uncompressed clones can
1939 # read repo formats supported by all servers that can serve
1939 # read repo formats supported by all servers that can serve
1940 # them.
1940 # them.
1941
1941
1942 # if revlog format changes, client will have to check version
1942 # if revlog format changes, client will have to check version
1943 # and format flags on "stream" capability, and use
1943 # and format flags on "stream" capability, and use
1944 # uncompressed only if compatible.
1944 # uncompressed only if compatible.
1945
1945
1946 if stream and not heads:
1946 if stream and not heads:
1947 # 'stream' means remote revlog format is revlogv1 only
1947 # 'stream' means remote revlog format is revlogv1 only
1948 if remote.capable('stream'):
1948 if remote.capable('stream'):
1949 return self.stream_in(remote, set(('revlogv1',)))
1949 return self.stream_in(remote, set(('revlogv1',)))
1950 # otherwise, 'streamreqs' contains the remote revlog format
1950 # otherwise, 'streamreqs' contains the remote revlog format
1951 streamreqs = remote.capable('streamreqs')
1951 streamreqs = remote.capable('streamreqs')
1952 if streamreqs:
1952 if streamreqs:
1953 streamreqs = set(streamreqs.split(','))
1953 streamreqs = set(streamreqs.split(','))
1954 # if we support it, stream in and adjust our requirements
1954 # if we support it, stream in and adjust our requirements
1955 if not streamreqs - self.supportedformats:
1955 if not streamreqs - self.supportedformats:
1956 return self.stream_in(remote, streamreqs)
1956 return self.stream_in(remote, streamreqs)
1957 return self.pull(remote, heads)
1957 return self.pull(remote, heads)
1958
1958
1959 def pushkey(self, namespace, key, old, new):
1959 def pushkey(self, namespace, key, old, new):
1960 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1960 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1961 old=old, new=new)
1961 old=old, new=new)
1962 ret = pushkey.push(self, namespace, key, old, new)
1962 ret = pushkey.push(self, namespace, key, old, new)
1963 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1963 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1964 ret=ret)
1964 ret=ret)
1965 return ret
1965 return ret
1966
1966
1967 def listkeys(self, namespace):
1967 def listkeys(self, namespace):
1968 self.hook('prelistkeys', throw=True, namespace=namespace)
1968 self.hook('prelistkeys', throw=True, namespace=namespace)
1969 values = pushkey.list(self, namespace)
1969 values = pushkey.list(self, namespace)
1970 self.hook('listkeys', namespace=namespace, values=values)
1970 self.hook('listkeys', namespace=namespace, values=values)
1971 return values
1971 return values
1972
1972
1973 def debugwireargs(self, one, two, three=None, four=None, five=None):
1973 def debugwireargs(self, one, two, three=None, four=None, five=None):
1974 '''used to test argument passing over the wire'''
1974 '''used to test argument passing over the wire'''
1975 return "%s %s %s %s %s" % (one, two, three, four, five)
1975 return "%s %s %s %s %s" % (one, two, three, four, five)
1976
1976
1977 def savecommitmessage(self, text):
1977 def savecommitmessage(self, text):
1978 fp = self.opener('last-message.txt', 'wb')
1978 fp = self.opener('last-message.txt', 'wb')
1979 try:
1979 try:
1980 fp.write(text)
1980 fp.write(text)
1981 finally:
1981 finally:
1982 fp.close()
1982 fp.close()
1983 return self.pathto(fp.name[len(self.root)+1:])
1983 return self.pathto(fp.name[len(self.root)+1:])
1984
1984
1985 # used to avoid circular references so destructors work
1985 # used to avoid circular references so destructors work
1986 def aftertrans(files):
1986 def aftertrans(files):
1987 renamefiles = [tuple(t) for t in files]
1987 renamefiles = [tuple(t) for t in files]
1988 def a():
1988 def a():
1989 for src, dest in renamefiles:
1989 for src, dest in renamefiles:
1990 util.rename(src, dest)
1990 util.rename(src, dest)
1991 return a
1991 return a
1992
1992
1993 def undoname(fn):
1993 def undoname(fn):
1994 base, name = os.path.split(fn)
1994 base, name = os.path.split(fn)
1995 assert name.startswith('journal')
1995 assert name.startswith('journal')
1996 return os.path.join(base, name.replace('journal', 'undo', 1))
1996 return os.path.join(base, name.replace('journal', 'undo', 1))
1997
1997
1998 def instance(ui, path, create):
1998 def instance(ui, path, create):
1999 return localrepository(ui, util.urllocalpath(path), create)
1999 return localrepository(ui, util.urllocalpath(path), create)
2000
2000
2001 def islocal(path):
2001 def islocal(path):
2002 return True
2002 return True
General Comments 0
You need to be logged in to leave comments. Login now