##// END OF EJS Templates
localrepo: add a cache with stat info for files under .hg/
Idan Kamara -
r14929:4bf9493e default
parent child Browse files
Show More
@@ -1,2003 +1,2009 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context, bookmarks
11 import changelog, dirstate, filelog, manifest, context, bookmarks
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import scmutil, util, extensions, hook, error, revset
13 import scmutil, util, extensions, hook, error, revset
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 from lock import release
17 from lock import release
18 import weakref, errno, os, time, inspect
18 import weakref, errno, os, time, inspect
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20
20
21 class localrepository(repo.repository):
21 class localrepository(repo.repository):
22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
23 'known', 'getbundle'))
23 'known', 'getbundle'))
24 supportedformats = set(('revlogv1', 'generaldelta'))
24 supportedformats = set(('revlogv1', 'generaldelta'))
25 supported = supportedformats | set(('store', 'fncache', 'shared',
25 supported = supportedformats | set(('store', 'fncache', 'shared',
26 'dotencode'))
26 'dotencode'))
27
27
28 def __init__(self, baseui, path=None, create=False):
28 def __init__(self, baseui, path=None, create=False):
29 repo.repository.__init__(self)
29 repo.repository.__init__(self)
30 self.root = os.path.realpath(util.expandpath(path))
30 self.root = os.path.realpath(util.expandpath(path))
31 self.path = os.path.join(self.root, ".hg")
31 self.path = os.path.join(self.root, ".hg")
32 self.origroot = path
32 self.origroot = path
33 self.auditor = scmutil.pathauditor(self.root, self._checknested)
33 self.auditor = scmutil.pathauditor(self.root, self._checknested)
34 self.opener = scmutil.opener(self.path)
34 self.opener = scmutil.opener(self.path)
35 self.wopener = scmutil.opener(self.root)
35 self.wopener = scmutil.opener(self.root)
36 self.baseui = baseui
36 self.baseui = baseui
37 self.ui = baseui.copy()
37 self.ui = baseui.copy()
38
38
39 try:
39 try:
40 self.ui.readconfig(self.join("hgrc"), self.root)
40 self.ui.readconfig(self.join("hgrc"), self.root)
41 extensions.loadall(self.ui)
41 extensions.loadall(self.ui)
42 except IOError:
42 except IOError:
43 pass
43 pass
44
44
45 if not os.path.isdir(self.path):
45 if not os.path.isdir(self.path):
46 if create:
46 if create:
47 if not os.path.exists(path):
47 if not os.path.exists(path):
48 util.makedirs(path)
48 util.makedirs(path)
49 util.makedir(self.path, notindexed=True)
49 util.makedir(self.path, notindexed=True)
50 requirements = ["revlogv1"]
50 requirements = ["revlogv1"]
51 if self.ui.configbool('format', 'usestore', True):
51 if self.ui.configbool('format', 'usestore', True):
52 os.mkdir(os.path.join(self.path, "store"))
52 os.mkdir(os.path.join(self.path, "store"))
53 requirements.append("store")
53 requirements.append("store")
54 if self.ui.configbool('format', 'usefncache', True):
54 if self.ui.configbool('format', 'usefncache', True):
55 requirements.append("fncache")
55 requirements.append("fncache")
56 if self.ui.configbool('format', 'dotencode', True):
56 if self.ui.configbool('format', 'dotencode', True):
57 requirements.append('dotencode')
57 requirements.append('dotencode')
58 # create an invalid changelog
58 # create an invalid changelog
59 self.opener.append(
59 self.opener.append(
60 "00changelog.i",
60 "00changelog.i",
61 '\0\0\0\2' # represents revlogv2
61 '\0\0\0\2' # represents revlogv2
62 ' dummy changelog to prevent using the old repo layout'
62 ' dummy changelog to prevent using the old repo layout'
63 )
63 )
64 if self.ui.configbool('format', 'generaldelta', False):
64 if self.ui.configbool('format', 'generaldelta', False):
65 requirements.append("generaldelta")
65 requirements.append("generaldelta")
66 requirements = set(requirements)
66 requirements = set(requirements)
67 else:
67 else:
68 raise error.RepoError(_("repository %s not found") % path)
68 raise error.RepoError(_("repository %s not found") % path)
69 elif create:
69 elif create:
70 raise error.RepoError(_("repository %s already exists") % path)
70 raise error.RepoError(_("repository %s already exists") % path)
71 else:
71 else:
72 try:
72 try:
73 requirements = scmutil.readrequires(self.opener, self.supported)
73 requirements = scmutil.readrequires(self.opener, self.supported)
74 except IOError, inst:
74 except IOError, inst:
75 if inst.errno != errno.ENOENT:
75 if inst.errno != errno.ENOENT:
76 raise
76 raise
77 requirements = set()
77 requirements = set()
78
78
79 self.sharedpath = self.path
79 self.sharedpath = self.path
80 try:
80 try:
81 s = os.path.realpath(self.opener.read("sharedpath"))
81 s = os.path.realpath(self.opener.read("sharedpath"))
82 if not os.path.exists(s):
82 if not os.path.exists(s):
83 raise error.RepoError(
83 raise error.RepoError(
84 _('.hg/sharedpath points to nonexistent directory %s') % s)
84 _('.hg/sharedpath points to nonexistent directory %s') % s)
85 self.sharedpath = s
85 self.sharedpath = s
86 except IOError, inst:
86 except IOError, inst:
87 if inst.errno != errno.ENOENT:
87 if inst.errno != errno.ENOENT:
88 raise
88 raise
89
89
90 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
90 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
91 self.spath = self.store.path
91 self.spath = self.store.path
92 self.sopener = self.store.opener
92 self.sopener = self.store.opener
93 self.sjoin = self.store.join
93 self.sjoin = self.store.join
94 self.opener.createmode = self.store.createmode
94 self.opener.createmode = self.store.createmode
95 self._applyrequirements(requirements)
95 self._applyrequirements(requirements)
96 if create:
96 if create:
97 self._writerequirements()
97 self._writerequirements()
98
98
99 # These two define the set of tags for this repository. _tags
99 # These two define the set of tags for this repository. _tags
100 # maps tag name to node; _tagtypes maps tag name to 'global' or
100 # maps tag name to node; _tagtypes maps tag name to 'global' or
101 # 'local'. (Global tags are defined by .hgtags across all
101 # 'local'. (Global tags are defined by .hgtags across all
102 # heads, and local tags are defined in .hg/localtags.) They
102 # heads, and local tags are defined in .hg/localtags.) They
103 # constitute the in-memory cache of tags.
103 # constitute the in-memory cache of tags.
104 self._tags = None
104 self._tags = None
105 self._tagtypes = None
105 self._tagtypes = None
106
106
107 self._branchcache = None
107 self._branchcache = None
108 self._branchcachetip = None
108 self._branchcachetip = None
109 self.nodetagscache = None
109 self.nodetagscache = None
110 self.filterpats = {}
110 self.filterpats = {}
111 self._datafilters = {}
111 self._datafilters = {}
112 self._transref = self._lockref = self._wlockref = None
112 self._transref = self._lockref = self._wlockref = None
113
113
114 # A cache for various files under .hg/ that tracks file changes,
115 # (used by the filecache decorator)
116 #
117 # Maps a property name to its util.filecacheentry
118 self._filecache = {}
119
114 def _applyrequirements(self, requirements):
120 def _applyrequirements(self, requirements):
115 self.requirements = requirements
121 self.requirements = requirements
116 openerreqs = set(('revlogv1', 'generaldelta'))
122 openerreqs = set(('revlogv1', 'generaldelta'))
117 self.sopener.options = dict((r, 1) for r in requirements
123 self.sopener.options = dict((r, 1) for r in requirements
118 if r in openerreqs)
124 if r in openerreqs)
119
125
120 def _writerequirements(self):
126 def _writerequirements(self):
121 reqfile = self.opener("requires", "w")
127 reqfile = self.opener("requires", "w")
122 for r in self.requirements:
128 for r in self.requirements:
123 reqfile.write("%s\n" % r)
129 reqfile.write("%s\n" % r)
124 reqfile.close()
130 reqfile.close()
125
131
126 def _checknested(self, path):
132 def _checknested(self, path):
127 """Determine if path is a legal nested repository."""
133 """Determine if path is a legal nested repository."""
128 if not path.startswith(self.root):
134 if not path.startswith(self.root):
129 return False
135 return False
130 subpath = path[len(self.root) + 1:]
136 subpath = path[len(self.root) + 1:]
131
137
132 # XXX: Checking against the current working copy is wrong in
138 # XXX: Checking against the current working copy is wrong in
133 # the sense that it can reject things like
139 # the sense that it can reject things like
134 #
140 #
135 # $ hg cat -r 10 sub/x.txt
141 # $ hg cat -r 10 sub/x.txt
136 #
142 #
137 # if sub/ is no longer a subrepository in the working copy
143 # if sub/ is no longer a subrepository in the working copy
138 # parent revision.
144 # parent revision.
139 #
145 #
140 # However, it can of course also allow things that would have
146 # However, it can of course also allow things that would have
141 # been rejected before, such as the above cat command if sub/
147 # been rejected before, such as the above cat command if sub/
142 # is a subrepository now, but was a normal directory before.
148 # is a subrepository now, but was a normal directory before.
143 # The old path auditor would have rejected by mistake since it
149 # The old path auditor would have rejected by mistake since it
144 # panics when it sees sub/.hg/.
150 # panics when it sees sub/.hg/.
145 #
151 #
146 # All in all, checking against the working copy seems sensible
152 # All in all, checking against the working copy seems sensible
147 # since we want to prevent access to nested repositories on
153 # since we want to prevent access to nested repositories on
148 # the filesystem *now*.
154 # the filesystem *now*.
149 ctx = self[None]
155 ctx = self[None]
150 parts = util.splitpath(subpath)
156 parts = util.splitpath(subpath)
151 while parts:
157 while parts:
152 prefix = os.sep.join(parts)
158 prefix = os.sep.join(parts)
153 if prefix in ctx.substate:
159 if prefix in ctx.substate:
154 if prefix == subpath:
160 if prefix == subpath:
155 return True
161 return True
156 else:
162 else:
157 sub = ctx.sub(prefix)
163 sub = ctx.sub(prefix)
158 return sub.checknested(subpath[len(prefix) + 1:])
164 return sub.checknested(subpath[len(prefix) + 1:])
159 else:
165 else:
160 parts.pop()
166 parts.pop()
161 return False
167 return False
162
168
163 @util.propertycache
169 @util.propertycache
164 def _bookmarks(self):
170 def _bookmarks(self):
165 return bookmarks.read(self)
171 return bookmarks.read(self)
166
172
167 @util.propertycache
173 @util.propertycache
168 def _bookmarkcurrent(self):
174 def _bookmarkcurrent(self):
169 return bookmarks.readcurrent(self)
175 return bookmarks.readcurrent(self)
170
176
171 @propertycache
177 @propertycache
172 def changelog(self):
178 def changelog(self):
173 c = changelog.changelog(self.sopener)
179 c = changelog.changelog(self.sopener)
174 if 'HG_PENDING' in os.environ:
180 if 'HG_PENDING' in os.environ:
175 p = os.environ['HG_PENDING']
181 p = os.environ['HG_PENDING']
176 if p.startswith(self.root):
182 if p.startswith(self.root):
177 c.readpending('00changelog.i.a')
183 c.readpending('00changelog.i.a')
178 return c
184 return c
179
185
180 @propertycache
186 @propertycache
181 def manifest(self):
187 def manifest(self):
182 return manifest.manifest(self.sopener)
188 return manifest.manifest(self.sopener)
183
189
184 @propertycache
190 @propertycache
185 def dirstate(self):
191 def dirstate(self):
186 warned = [0]
192 warned = [0]
187 def validate(node):
193 def validate(node):
188 try:
194 try:
189 self.changelog.rev(node)
195 self.changelog.rev(node)
190 return node
196 return node
191 except error.LookupError:
197 except error.LookupError:
192 if not warned[0]:
198 if not warned[0]:
193 warned[0] = True
199 warned[0] = True
194 self.ui.warn(_("warning: ignoring unknown"
200 self.ui.warn(_("warning: ignoring unknown"
195 " working parent %s!\n") % short(node))
201 " working parent %s!\n") % short(node))
196 return nullid
202 return nullid
197
203
198 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
204 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
199
205
200 def __getitem__(self, changeid):
206 def __getitem__(self, changeid):
201 if changeid is None:
207 if changeid is None:
202 return context.workingctx(self)
208 return context.workingctx(self)
203 return context.changectx(self, changeid)
209 return context.changectx(self, changeid)
204
210
205 def __contains__(self, changeid):
211 def __contains__(self, changeid):
206 try:
212 try:
207 return bool(self.lookup(changeid))
213 return bool(self.lookup(changeid))
208 except error.RepoLookupError:
214 except error.RepoLookupError:
209 return False
215 return False
210
216
211 def __nonzero__(self):
217 def __nonzero__(self):
212 return True
218 return True
213
219
214 def __len__(self):
220 def __len__(self):
215 return len(self.changelog)
221 return len(self.changelog)
216
222
217 def __iter__(self):
223 def __iter__(self):
218 for i in xrange(len(self)):
224 for i in xrange(len(self)):
219 yield i
225 yield i
220
226
221 def set(self, expr, *args):
227 def set(self, expr, *args):
222 '''
228 '''
223 Yield a context for each matching revision, after doing arg
229 Yield a context for each matching revision, after doing arg
224 replacement via revset.formatspec
230 replacement via revset.formatspec
225 '''
231 '''
226
232
227 expr = revset.formatspec(expr, *args)
233 expr = revset.formatspec(expr, *args)
228 m = revset.match(None, expr)
234 m = revset.match(None, expr)
229 for r in m(self, range(len(self))):
235 for r in m(self, range(len(self))):
230 yield self[r]
236 yield self[r]
231
237
232 def url(self):
238 def url(self):
233 return 'file:' + self.root
239 return 'file:' + self.root
234
240
235 def hook(self, name, throw=False, **args):
241 def hook(self, name, throw=False, **args):
236 return hook.hook(self.ui, self, name, throw, **args)
242 return hook.hook(self.ui, self, name, throw, **args)
237
243
238 tag_disallowed = ':\r\n'
244 tag_disallowed = ':\r\n'
239
245
240 def _tag(self, names, node, message, local, user, date, extra={}):
246 def _tag(self, names, node, message, local, user, date, extra={}):
241 if isinstance(names, str):
247 if isinstance(names, str):
242 allchars = names
248 allchars = names
243 names = (names,)
249 names = (names,)
244 else:
250 else:
245 allchars = ''.join(names)
251 allchars = ''.join(names)
246 for c in self.tag_disallowed:
252 for c in self.tag_disallowed:
247 if c in allchars:
253 if c in allchars:
248 raise util.Abort(_('%r cannot be used in a tag name') % c)
254 raise util.Abort(_('%r cannot be used in a tag name') % c)
249
255
250 branches = self.branchmap()
256 branches = self.branchmap()
251 for name in names:
257 for name in names:
252 self.hook('pretag', throw=True, node=hex(node), tag=name,
258 self.hook('pretag', throw=True, node=hex(node), tag=name,
253 local=local)
259 local=local)
254 if name in branches:
260 if name in branches:
255 self.ui.warn(_("warning: tag %s conflicts with existing"
261 self.ui.warn(_("warning: tag %s conflicts with existing"
256 " branch name\n") % name)
262 " branch name\n") % name)
257
263
258 def writetags(fp, names, munge, prevtags):
264 def writetags(fp, names, munge, prevtags):
259 fp.seek(0, 2)
265 fp.seek(0, 2)
260 if prevtags and prevtags[-1] != '\n':
266 if prevtags and prevtags[-1] != '\n':
261 fp.write('\n')
267 fp.write('\n')
262 for name in names:
268 for name in names:
263 m = munge and munge(name) or name
269 m = munge and munge(name) or name
264 if self._tagtypes and name in self._tagtypes:
270 if self._tagtypes and name in self._tagtypes:
265 old = self._tags.get(name, nullid)
271 old = self._tags.get(name, nullid)
266 fp.write('%s %s\n' % (hex(old), m))
272 fp.write('%s %s\n' % (hex(old), m))
267 fp.write('%s %s\n' % (hex(node), m))
273 fp.write('%s %s\n' % (hex(node), m))
268 fp.close()
274 fp.close()
269
275
270 prevtags = ''
276 prevtags = ''
271 if local:
277 if local:
272 try:
278 try:
273 fp = self.opener('localtags', 'r+')
279 fp = self.opener('localtags', 'r+')
274 except IOError:
280 except IOError:
275 fp = self.opener('localtags', 'a')
281 fp = self.opener('localtags', 'a')
276 else:
282 else:
277 prevtags = fp.read()
283 prevtags = fp.read()
278
284
279 # local tags are stored in the current charset
285 # local tags are stored in the current charset
280 writetags(fp, names, None, prevtags)
286 writetags(fp, names, None, prevtags)
281 for name in names:
287 for name in names:
282 self.hook('tag', node=hex(node), tag=name, local=local)
288 self.hook('tag', node=hex(node), tag=name, local=local)
283 return
289 return
284
290
285 try:
291 try:
286 fp = self.wfile('.hgtags', 'rb+')
292 fp = self.wfile('.hgtags', 'rb+')
287 except IOError, e:
293 except IOError, e:
288 if e.errno != errno.ENOENT:
294 if e.errno != errno.ENOENT:
289 raise
295 raise
290 fp = self.wfile('.hgtags', 'ab')
296 fp = self.wfile('.hgtags', 'ab')
291 else:
297 else:
292 prevtags = fp.read()
298 prevtags = fp.read()
293
299
294 # committed tags are stored in UTF-8
300 # committed tags are stored in UTF-8
295 writetags(fp, names, encoding.fromlocal, prevtags)
301 writetags(fp, names, encoding.fromlocal, prevtags)
296
302
297 fp.close()
303 fp.close()
298
304
299 if '.hgtags' not in self.dirstate:
305 if '.hgtags' not in self.dirstate:
300 self[None].add(['.hgtags'])
306 self[None].add(['.hgtags'])
301
307
302 m = matchmod.exact(self.root, '', ['.hgtags'])
308 m = matchmod.exact(self.root, '', ['.hgtags'])
303 tagnode = self.commit(message, user, date, extra=extra, match=m)
309 tagnode = self.commit(message, user, date, extra=extra, match=m)
304
310
305 for name in names:
311 for name in names:
306 self.hook('tag', node=hex(node), tag=name, local=local)
312 self.hook('tag', node=hex(node), tag=name, local=local)
307
313
308 return tagnode
314 return tagnode
309
315
310 def tag(self, names, node, message, local, user, date):
316 def tag(self, names, node, message, local, user, date):
311 '''tag a revision with one or more symbolic names.
317 '''tag a revision with one or more symbolic names.
312
318
313 names is a list of strings or, when adding a single tag, names may be a
319 names is a list of strings or, when adding a single tag, names may be a
314 string.
320 string.
315
321
316 if local is True, the tags are stored in a per-repository file.
322 if local is True, the tags are stored in a per-repository file.
317 otherwise, they are stored in the .hgtags file, and a new
323 otherwise, they are stored in the .hgtags file, and a new
318 changeset is committed with the change.
324 changeset is committed with the change.
319
325
320 keyword arguments:
326 keyword arguments:
321
327
322 local: whether to store tags in non-version-controlled file
328 local: whether to store tags in non-version-controlled file
323 (default False)
329 (default False)
324
330
325 message: commit message to use if committing
331 message: commit message to use if committing
326
332
327 user: name of user to use if committing
333 user: name of user to use if committing
328
334
329 date: date tuple to use if committing'''
335 date: date tuple to use if committing'''
330
336
331 if not local:
337 if not local:
332 for x in self.status()[:5]:
338 for x in self.status()[:5]:
333 if '.hgtags' in x:
339 if '.hgtags' in x:
334 raise util.Abort(_('working copy of .hgtags is changed '
340 raise util.Abort(_('working copy of .hgtags is changed '
335 '(please commit .hgtags manually)'))
341 '(please commit .hgtags manually)'))
336
342
337 self.tags() # instantiate the cache
343 self.tags() # instantiate the cache
338 self._tag(names, node, message, local, user, date)
344 self._tag(names, node, message, local, user, date)
339
345
340 def tags(self):
346 def tags(self):
341 '''return a mapping of tag to node'''
347 '''return a mapping of tag to node'''
342 if self._tags is None:
348 if self._tags is None:
343 (self._tags, self._tagtypes) = self._findtags()
349 (self._tags, self._tagtypes) = self._findtags()
344
350
345 return self._tags
351 return self._tags
346
352
347 def _findtags(self):
353 def _findtags(self):
348 '''Do the hard work of finding tags. Return a pair of dicts
354 '''Do the hard work of finding tags. Return a pair of dicts
349 (tags, tagtypes) where tags maps tag name to node, and tagtypes
355 (tags, tagtypes) where tags maps tag name to node, and tagtypes
350 maps tag name to a string like \'global\' or \'local\'.
356 maps tag name to a string like \'global\' or \'local\'.
351 Subclasses or extensions are free to add their own tags, but
357 Subclasses or extensions are free to add their own tags, but
352 should be aware that the returned dicts will be retained for the
358 should be aware that the returned dicts will be retained for the
353 duration of the localrepo object.'''
359 duration of the localrepo object.'''
354
360
355 # XXX what tagtype should subclasses/extensions use? Currently
361 # XXX what tagtype should subclasses/extensions use? Currently
356 # mq and bookmarks add tags, but do not set the tagtype at all.
362 # mq and bookmarks add tags, but do not set the tagtype at all.
357 # Should each extension invent its own tag type? Should there
363 # Should each extension invent its own tag type? Should there
358 # be one tagtype for all such "virtual" tags? Or is the status
364 # be one tagtype for all such "virtual" tags? Or is the status
359 # quo fine?
365 # quo fine?
360
366
361 alltags = {} # map tag name to (node, hist)
367 alltags = {} # map tag name to (node, hist)
362 tagtypes = {}
368 tagtypes = {}
363
369
364 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
370 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
365 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
371 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
366
372
367 # Build the return dicts. Have to re-encode tag names because
373 # Build the return dicts. Have to re-encode tag names because
368 # the tags module always uses UTF-8 (in order not to lose info
374 # the tags module always uses UTF-8 (in order not to lose info
369 # writing to the cache), but the rest of Mercurial wants them in
375 # writing to the cache), but the rest of Mercurial wants them in
370 # local encoding.
376 # local encoding.
371 tags = {}
377 tags = {}
372 for (name, (node, hist)) in alltags.iteritems():
378 for (name, (node, hist)) in alltags.iteritems():
373 if node != nullid:
379 if node != nullid:
374 try:
380 try:
375 # ignore tags to unknown nodes
381 # ignore tags to unknown nodes
376 self.changelog.lookup(node)
382 self.changelog.lookup(node)
377 tags[encoding.tolocal(name)] = node
383 tags[encoding.tolocal(name)] = node
378 except error.LookupError:
384 except error.LookupError:
379 pass
385 pass
380 tags['tip'] = self.changelog.tip()
386 tags['tip'] = self.changelog.tip()
381 tagtypes = dict([(encoding.tolocal(name), value)
387 tagtypes = dict([(encoding.tolocal(name), value)
382 for (name, value) in tagtypes.iteritems()])
388 for (name, value) in tagtypes.iteritems()])
383 return (tags, tagtypes)
389 return (tags, tagtypes)
384
390
385 def tagtype(self, tagname):
391 def tagtype(self, tagname):
386 '''
392 '''
387 return the type of the given tag. result can be:
393 return the type of the given tag. result can be:
388
394
389 'local' : a local tag
395 'local' : a local tag
390 'global' : a global tag
396 'global' : a global tag
391 None : tag does not exist
397 None : tag does not exist
392 '''
398 '''
393
399
394 self.tags()
400 self.tags()
395
401
396 return self._tagtypes.get(tagname)
402 return self._tagtypes.get(tagname)
397
403
398 def tagslist(self):
404 def tagslist(self):
399 '''return a list of tags ordered by revision'''
405 '''return a list of tags ordered by revision'''
400 l = []
406 l = []
401 for t, n in self.tags().iteritems():
407 for t, n in self.tags().iteritems():
402 r = self.changelog.rev(n)
408 r = self.changelog.rev(n)
403 l.append((r, t, n))
409 l.append((r, t, n))
404 return [(t, n) for r, t, n in sorted(l)]
410 return [(t, n) for r, t, n in sorted(l)]
405
411
406 def nodetags(self, node):
412 def nodetags(self, node):
407 '''return the tags associated with a node'''
413 '''return the tags associated with a node'''
408 if not self.nodetagscache:
414 if not self.nodetagscache:
409 self.nodetagscache = {}
415 self.nodetagscache = {}
410 for t, n in self.tags().iteritems():
416 for t, n in self.tags().iteritems():
411 self.nodetagscache.setdefault(n, []).append(t)
417 self.nodetagscache.setdefault(n, []).append(t)
412 for tags in self.nodetagscache.itervalues():
418 for tags in self.nodetagscache.itervalues():
413 tags.sort()
419 tags.sort()
414 return self.nodetagscache.get(node, [])
420 return self.nodetagscache.get(node, [])
415
421
416 def nodebookmarks(self, node):
422 def nodebookmarks(self, node):
417 marks = []
423 marks = []
418 for bookmark, n in self._bookmarks.iteritems():
424 for bookmark, n in self._bookmarks.iteritems():
419 if n == node:
425 if n == node:
420 marks.append(bookmark)
426 marks.append(bookmark)
421 return sorted(marks)
427 return sorted(marks)
422
428
423 def _branchtags(self, partial, lrev):
429 def _branchtags(self, partial, lrev):
424 # TODO: rename this function?
430 # TODO: rename this function?
425 tiprev = len(self) - 1
431 tiprev = len(self) - 1
426 if lrev != tiprev:
432 if lrev != tiprev:
427 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
433 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
428 self._updatebranchcache(partial, ctxgen)
434 self._updatebranchcache(partial, ctxgen)
429 self._writebranchcache(partial, self.changelog.tip(), tiprev)
435 self._writebranchcache(partial, self.changelog.tip(), tiprev)
430
436
431 return partial
437 return partial
432
438
433 def updatebranchcache(self):
439 def updatebranchcache(self):
434 tip = self.changelog.tip()
440 tip = self.changelog.tip()
435 if self._branchcache is not None and self._branchcachetip == tip:
441 if self._branchcache is not None and self._branchcachetip == tip:
436 return self._branchcache
442 return self._branchcache
437
443
438 oldtip = self._branchcachetip
444 oldtip = self._branchcachetip
439 self._branchcachetip = tip
445 self._branchcachetip = tip
440 if oldtip is None or oldtip not in self.changelog.nodemap:
446 if oldtip is None or oldtip not in self.changelog.nodemap:
441 partial, last, lrev = self._readbranchcache()
447 partial, last, lrev = self._readbranchcache()
442 else:
448 else:
443 lrev = self.changelog.rev(oldtip)
449 lrev = self.changelog.rev(oldtip)
444 partial = self._branchcache
450 partial = self._branchcache
445
451
446 self._branchtags(partial, lrev)
452 self._branchtags(partial, lrev)
447 # this private cache holds all heads (not just tips)
453 # this private cache holds all heads (not just tips)
448 self._branchcache = partial
454 self._branchcache = partial
449
455
450 def branchmap(self):
456 def branchmap(self):
451 '''returns a dictionary {branch: [branchheads]}'''
457 '''returns a dictionary {branch: [branchheads]}'''
452 self.updatebranchcache()
458 self.updatebranchcache()
453 return self._branchcache
459 return self._branchcache
454
460
455 def branchtags(self):
461 def branchtags(self):
456 '''return a dict where branch names map to the tipmost head of
462 '''return a dict where branch names map to the tipmost head of
457 the branch, open heads come before closed'''
463 the branch, open heads come before closed'''
458 bt = {}
464 bt = {}
459 for bn, heads in self.branchmap().iteritems():
465 for bn, heads in self.branchmap().iteritems():
460 tip = heads[-1]
466 tip = heads[-1]
461 for h in reversed(heads):
467 for h in reversed(heads):
462 if 'close' not in self.changelog.read(h)[5]:
468 if 'close' not in self.changelog.read(h)[5]:
463 tip = h
469 tip = h
464 break
470 break
465 bt[bn] = tip
471 bt[bn] = tip
466 return bt
472 return bt
467
473
468 def _readbranchcache(self):
474 def _readbranchcache(self):
469 partial = {}
475 partial = {}
470 try:
476 try:
471 f = self.opener("cache/branchheads")
477 f = self.opener("cache/branchheads")
472 lines = f.read().split('\n')
478 lines = f.read().split('\n')
473 f.close()
479 f.close()
474 except (IOError, OSError):
480 except (IOError, OSError):
475 return {}, nullid, nullrev
481 return {}, nullid, nullrev
476
482
477 try:
483 try:
478 last, lrev = lines.pop(0).split(" ", 1)
484 last, lrev = lines.pop(0).split(" ", 1)
479 last, lrev = bin(last), int(lrev)
485 last, lrev = bin(last), int(lrev)
480 if lrev >= len(self) or self[lrev].node() != last:
486 if lrev >= len(self) or self[lrev].node() != last:
481 # invalidate the cache
487 # invalidate the cache
482 raise ValueError('invalidating branch cache (tip differs)')
488 raise ValueError('invalidating branch cache (tip differs)')
483 for l in lines:
489 for l in lines:
484 if not l:
490 if not l:
485 continue
491 continue
486 node, label = l.split(" ", 1)
492 node, label = l.split(" ", 1)
487 label = encoding.tolocal(label.strip())
493 label = encoding.tolocal(label.strip())
488 partial.setdefault(label, []).append(bin(node))
494 partial.setdefault(label, []).append(bin(node))
489 except KeyboardInterrupt:
495 except KeyboardInterrupt:
490 raise
496 raise
491 except Exception, inst:
497 except Exception, inst:
492 if self.ui.debugflag:
498 if self.ui.debugflag:
493 self.ui.warn(str(inst), '\n')
499 self.ui.warn(str(inst), '\n')
494 partial, last, lrev = {}, nullid, nullrev
500 partial, last, lrev = {}, nullid, nullrev
495 return partial, last, lrev
501 return partial, last, lrev
496
502
497 def _writebranchcache(self, branches, tip, tiprev):
503 def _writebranchcache(self, branches, tip, tiprev):
498 try:
504 try:
499 f = self.opener("cache/branchheads", "w", atomictemp=True)
505 f = self.opener("cache/branchheads", "w", atomictemp=True)
500 f.write("%s %s\n" % (hex(tip), tiprev))
506 f.write("%s %s\n" % (hex(tip), tiprev))
501 for label, nodes in branches.iteritems():
507 for label, nodes in branches.iteritems():
502 for node in nodes:
508 for node in nodes:
503 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
509 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
504 f.rename()
510 f.rename()
505 except (IOError, OSError):
511 except (IOError, OSError):
506 pass
512 pass
507
513
508 def _updatebranchcache(self, partial, ctxgen):
514 def _updatebranchcache(self, partial, ctxgen):
509 # collect new branch entries
515 # collect new branch entries
510 newbranches = {}
516 newbranches = {}
511 for c in ctxgen:
517 for c in ctxgen:
512 newbranches.setdefault(c.branch(), []).append(c.node())
518 newbranches.setdefault(c.branch(), []).append(c.node())
513 # if older branchheads are reachable from new ones, they aren't
519 # if older branchheads are reachable from new ones, they aren't
514 # really branchheads. Note checking parents is insufficient:
520 # really branchheads. Note checking parents is insufficient:
515 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
521 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
516 for branch, newnodes in newbranches.iteritems():
522 for branch, newnodes in newbranches.iteritems():
517 bheads = partial.setdefault(branch, [])
523 bheads = partial.setdefault(branch, [])
518 bheads.extend(newnodes)
524 bheads.extend(newnodes)
519 if len(bheads) <= 1:
525 if len(bheads) <= 1:
520 continue
526 continue
521 bheads = sorted(bheads, key=lambda x: self[x].rev())
527 bheads = sorted(bheads, key=lambda x: self[x].rev())
522 # starting from tip means fewer passes over reachable
528 # starting from tip means fewer passes over reachable
523 while newnodes:
529 while newnodes:
524 latest = newnodes.pop()
530 latest = newnodes.pop()
525 if latest not in bheads:
531 if latest not in bheads:
526 continue
532 continue
527 minbhrev = self[bheads[0]].node()
533 minbhrev = self[bheads[0]].node()
528 reachable = self.changelog.reachable(latest, minbhrev)
534 reachable = self.changelog.reachable(latest, minbhrev)
529 reachable.remove(latest)
535 reachable.remove(latest)
530 if reachable:
536 if reachable:
531 bheads = [b for b in bheads if b not in reachable]
537 bheads = [b for b in bheads if b not in reachable]
532 partial[branch] = bheads
538 partial[branch] = bheads
533
539
534 def lookup(self, key):
540 def lookup(self, key):
535 if isinstance(key, int):
541 if isinstance(key, int):
536 return self.changelog.node(key)
542 return self.changelog.node(key)
537 elif key == '.':
543 elif key == '.':
538 return self.dirstate.p1()
544 return self.dirstate.p1()
539 elif key == 'null':
545 elif key == 'null':
540 return nullid
546 return nullid
541 elif key == 'tip':
547 elif key == 'tip':
542 return self.changelog.tip()
548 return self.changelog.tip()
543 n = self.changelog._match(key)
549 n = self.changelog._match(key)
544 if n:
550 if n:
545 return n
551 return n
546 if key in self._bookmarks:
552 if key in self._bookmarks:
547 return self._bookmarks[key]
553 return self._bookmarks[key]
548 if key in self.tags():
554 if key in self.tags():
549 return self.tags()[key]
555 return self.tags()[key]
550 if key in self.branchtags():
556 if key in self.branchtags():
551 return self.branchtags()[key]
557 return self.branchtags()[key]
552 n = self.changelog._partialmatch(key)
558 n = self.changelog._partialmatch(key)
553 if n:
559 if n:
554 return n
560 return n
555
561
556 # can't find key, check if it might have come from damaged dirstate
562 # can't find key, check if it might have come from damaged dirstate
557 if key in self.dirstate.parents():
563 if key in self.dirstate.parents():
558 raise error.Abort(_("working directory has unknown parent '%s'!")
564 raise error.Abort(_("working directory has unknown parent '%s'!")
559 % short(key))
565 % short(key))
560 try:
566 try:
561 if len(key) == 20:
567 if len(key) == 20:
562 key = hex(key)
568 key = hex(key)
563 except TypeError:
569 except TypeError:
564 pass
570 pass
565 raise error.RepoLookupError(_("unknown revision '%s'") % key)
571 raise error.RepoLookupError(_("unknown revision '%s'") % key)
566
572
567 def lookupbranch(self, key, remote=None):
573 def lookupbranch(self, key, remote=None):
568 repo = remote or self
574 repo = remote or self
569 if key in repo.branchmap():
575 if key in repo.branchmap():
570 return key
576 return key
571
577
572 repo = (remote and remote.local()) and remote or self
578 repo = (remote and remote.local()) and remote or self
573 return repo[key].branch()
579 return repo[key].branch()
574
580
575 def known(self, nodes):
581 def known(self, nodes):
576 nm = self.changelog.nodemap
582 nm = self.changelog.nodemap
577 return [(n in nm) for n in nodes]
583 return [(n in nm) for n in nodes]
578
584
579 def local(self):
585 def local(self):
580 return self
586 return self
581
587
582 def join(self, f):
588 def join(self, f):
583 return os.path.join(self.path, f)
589 return os.path.join(self.path, f)
584
590
585 def wjoin(self, f):
591 def wjoin(self, f):
586 return os.path.join(self.root, f)
592 return os.path.join(self.root, f)
587
593
588 def file(self, f):
594 def file(self, f):
589 if f[0] == '/':
595 if f[0] == '/':
590 f = f[1:]
596 f = f[1:]
591 return filelog.filelog(self.sopener, f)
597 return filelog.filelog(self.sopener, f)
592
598
593 def changectx(self, changeid):
599 def changectx(self, changeid):
594 return self[changeid]
600 return self[changeid]
595
601
596 def parents(self, changeid=None):
602 def parents(self, changeid=None):
597 '''get list of changectxs for parents of changeid'''
603 '''get list of changectxs for parents of changeid'''
598 return self[changeid].parents()
604 return self[changeid].parents()
599
605
600 def filectx(self, path, changeid=None, fileid=None):
606 def filectx(self, path, changeid=None, fileid=None):
601 """changeid can be a changeset revision, node, or tag.
607 """changeid can be a changeset revision, node, or tag.
602 fileid can be a file revision or node."""
608 fileid can be a file revision or node."""
603 return context.filectx(self, path, changeid, fileid)
609 return context.filectx(self, path, changeid, fileid)
604
610
605 def getcwd(self):
611 def getcwd(self):
606 return self.dirstate.getcwd()
612 return self.dirstate.getcwd()
607
613
608 def pathto(self, f, cwd=None):
614 def pathto(self, f, cwd=None):
609 return self.dirstate.pathto(f, cwd)
615 return self.dirstate.pathto(f, cwd)
610
616
611 def wfile(self, f, mode='r'):
617 def wfile(self, f, mode='r'):
612 return self.wopener(f, mode)
618 return self.wopener(f, mode)
613
619
614 def _link(self, f):
620 def _link(self, f):
615 return os.path.islink(self.wjoin(f))
621 return os.path.islink(self.wjoin(f))
616
622
617 def _loadfilter(self, filter):
623 def _loadfilter(self, filter):
618 if filter not in self.filterpats:
624 if filter not in self.filterpats:
619 l = []
625 l = []
620 for pat, cmd in self.ui.configitems(filter):
626 for pat, cmd in self.ui.configitems(filter):
621 if cmd == '!':
627 if cmd == '!':
622 continue
628 continue
623 mf = matchmod.match(self.root, '', [pat])
629 mf = matchmod.match(self.root, '', [pat])
624 fn = None
630 fn = None
625 params = cmd
631 params = cmd
626 for name, filterfn in self._datafilters.iteritems():
632 for name, filterfn in self._datafilters.iteritems():
627 if cmd.startswith(name):
633 if cmd.startswith(name):
628 fn = filterfn
634 fn = filterfn
629 params = cmd[len(name):].lstrip()
635 params = cmd[len(name):].lstrip()
630 break
636 break
631 if not fn:
637 if not fn:
632 fn = lambda s, c, **kwargs: util.filter(s, c)
638 fn = lambda s, c, **kwargs: util.filter(s, c)
633 # Wrap old filters not supporting keyword arguments
639 # Wrap old filters not supporting keyword arguments
634 if not inspect.getargspec(fn)[2]:
640 if not inspect.getargspec(fn)[2]:
635 oldfn = fn
641 oldfn = fn
636 fn = lambda s, c, **kwargs: oldfn(s, c)
642 fn = lambda s, c, **kwargs: oldfn(s, c)
637 l.append((mf, fn, params))
643 l.append((mf, fn, params))
638 self.filterpats[filter] = l
644 self.filterpats[filter] = l
639 return self.filterpats[filter]
645 return self.filterpats[filter]
640
646
641 def _filter(self, filterpats, filename, data):
647 def _filter(self, filterpats, filename, data):
642 for mf, fn, cmd in filterpats:
648 for mf, fn, cmd in filterpats:
643 if mf(filename):
649 if mf(filename):
644 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
650 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
645 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
651 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
646 break
652 break
647
653
648 return data
654 return data
649
655
650 @propertycache
656 @propertycache
651 def _encodefilterpats(self):
657 def _encodefilterpats(self):
652 return self._loadfilter('encode')
658 return self._loadfilter('encode')
653
659
654 @propertycache
660 @propertycache
655 def _decodefilterpats(self):
661 def _decodefilterpats(self):
656 return self._loadfilter('decode')
662 return self._loadfilter('decode')
657
663
658 def adddatafilter(self, name, filter):
664 def adddatafilter(self, name, filter):
659 self._datafilters[name] = filter
665 self._datafilters[name] = filter
660
666
661 def wread(self, filename):
667 def wread(self, filename):
662 if self._link(filename):
668 if self._link(filename):
663 data = os.readlink(self.wjoin(filename))
669 data = os.readlink(self.wjoin(filename))
664 else:
670 else:
665 data = self.wopener.read(filename)
671 data = self.wopener.read(filename)
666 return self._filter(self._encodefilterpats, filename, data)
672 return self._filter(self._encodefilterpats, filename, data)
667
673
668 def wwrite(self, filename, data, flags):
674 def wwrite(self, filename, data, flags):
669 data = self._filter(self._decodefilterpats, filename, data)
675 data = self._filter(self._decodefilterpats, filename, data)
670 if 'l' in flags:
676 if 'l' in flags:
671 self.wopener.symlink(data, filename)
677 self.wopener.symlink(data, filename)
672 else:
678 else:
673 self.wopener.write(filename, data)
679 self.wopener.write(filename, data)
674 if 'x' in flags:
680 if 'x' in flags:
675 util.setflags(self.wjoin(filename), False, True)
681 util.setflags(self.wjoin(filename), False, True)
676
682
677 def wwritedata(self, filename, data):
683 def wwritedata(self, filename, data):
678 return self._filter(self._decodefilterpats, filename, data)
684 return self._filter(self._decodefilterpats, filename, data)
679
685
680 def transaction(self, desc):
686 def transaction(self, desc):
681 tr = self._transref and self._transref() or None
687 tr = self._transref and self._transref() or None
682 if tr and tr.running():
688 if tr and tr.running():
683 return tr.nest()
689 return tr.nest()
684
690
685 # abort here if the journal already exists
691 # abort here if the journal already exists
686 if os.path.exists(self.sjoin("journal")):
692 if os.path.exists(self.sjoin("journal")):
687 raise error.RepoError(
693 raise error.RepoError(
688 _("abandoned transaction found - run hg recover"))
694 _("abandoned transaction found - run hg recover"))
689
695
690 journalfiles = self._writejournal(desc)
696 journalfiles = self._writejournal(desc)
691 renames = [(x, undoname(x)) for x in journalfiles]
697 renames = [(x, undoname(x)) for x in journalfiles]
692
698
693 tr = transaction.transaction(self.ui.warn, self.sopener,
699 tr = transaction.transaction(self.ui.warn, self.sopener,
694 self.sjoin("journal"),
700 self.sjoin("journal"),
695 aftertrans(renames),
701 aftertrans(renames),
696 self.store.createmode)
702 self.store.createmode)
697 self._transref = weakref.ref(tr)
703 self._transref = weakref.ref(tr)
698 return tr
704 return tr
699
705
700 def _writejournal(self, desc):
706 def _writejournal(self, desc):
701 # save dirstate for rollback
707 # save dirstate for rollback
702 try:
708 try:
703 ds = self.opener.read("dirstate")
709 ds = self.opener.read("dirstate")
704 except IOError:
710 except IOError:
705 ds = ""
711 ds = ""
706 self.opener.write("journal.dirstate", ds)
712 self.opener.write("journal.dirstate", ds)
707 self.opener.write("journal.branch",
713 self.opener.write("journal.branch",
708 encoding.fromlocal(self.dirstate.branch()))
714 encoding.fromlocal(self.dirstate.branch()))
709 self.opener.write("journal.desc",
715 self.opener.write("journal.desc",
710 "%d\n%s\n" % (len(self), desc))
716 "%d\n%s\n" % (len(self), desc))
711
717
712 bkname = self.join('bookmarks')
718 bkname = self.join('bookmarks')
713 if os.path.exists(bkname):
719 if os.path.exists(bkname):
714 util.copyfile(bkname, self.join('journal.bookmarks'))
720 util.copyfile(bkname, self.join('journal.bookmarks'))
715 else:
721 else:
716 self.opener.write('journal.bookmarks', '')
722 self.opener.write('journal.bookmarks', '')
717
723
718 return (self.sjoin('journal'), self.join('journal.dirstate'),
724 return (self.sjoin('journal'), self.join('journal.dirstate'),
719 self.join('journal.branch'), self.join('journal.desc'),
725 self.join('journal.branch'), self.join('journal.desc'),
720 self.join('journal.bookmarks'))
726 self.join('journal.bookmarks'))
721
727
722 def recover(self):
728 def recover(self):
723 lock = self.lock()
729 lock = self.lock()
724 try:
730 try:
725 if os.path.exists(self.sjoin("journal")):
731 if os.path.exists(self.sjoin("journal")):
726 self.ui.status(_("rolling back interrupted transaction\n"))
732 self.ui.status(_("rolling back interrupted transaction\n"))
727 transaction.rollback(self.sopener, self.sjoin("journal"),
733 transaction.rollback(self.sopener, self.sjoin("journal"),
728 self.ui.warn)
734 self.ui.warn)
729 self.invalidate()
735 self.invalidate()
730 return True
736 return True
731 else:
737 else:
732 self.ui.warn(_("no interrupted transaction available\n"))
738 self.ui.warn(_("no interrupted transaction available\n"))
733 return False
739 return False
734 finally:
740 finally:
735 lock.release()
741 lock.release()
736
742
737 def rollback(self, dryrun=False):
743 def rollback(self, dryrun=False):
738 wlock = lock = None
744 wlock = lock = None
739 try:
745 try:
740 wlock = self.wlock()
746 wlock = self.wlock()
741 lock = self.lock()
747 lock = self.lock()
742 if os.path.exists(self.sjoin("undo")):
748 if os.path.exists(self.sjoin("undo")):
743 try:
749 try:
744 args = self.opener.read("undo.desc").splitlines()
750 args = self.opener.read("undo.desc").splitlines()
745 if len(args) >= 3 and self.ui.verbose:
751 if len(args) >= 3 and self.ui.verbose:
746 desc = _("repository tip rolled back to revision %s"
752 desc = _("repository tip rolled back to revision %s"
747 " (undo %s: %s)\n") % (
753 " (undo %s: %s)\n") % (
748 int(args[0]) - 1, args[1], args[2])
754 int(args[0]) - 1, args[1], args[2])
749 elif len(args) >= 2:
755 elif len(args) >= 2:
750 desc = _("repository tip rolled back to revision %s"
756 desc = _("repository tip rolled back to revision %s"
751 " (undo %s)\n") % (
757 " (undo %s)\n") % (
752 int(args[0]) - 1, args[1])
758 int(args[0]) - 1, args[1])
753 except IOError:
759 except IOError:
754 desc = _("rolling back unknown transaction\n")
760 desc = _("rolling back unknown transaction\n")
755 self.ui.status(desc)
761 self.ui.status(desc)
756 if dryrun:
762 if dryrun:
757 return
763 return
758 transaction.rollback(self.sopener, self.sjoin("undo"),
764 transaction.rollback(self.sopener, self.sjoin("undo"),
759 self.ui.warn)
765 self.ui.warn)
760 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
766 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
761 if os.path.exists(self.join('undo.bookmarks')):
767 if os.path.exists(self.join('undo.bookmarks')):
762 util.rename(self.join('undo.bookmarks'),
768 util.rename(self.join('undo.bookmarks'),
763 self.join('bookmarks'))
769 self.join('bookmarks'))
764 try:
770 try:
765 branch = self.opener.read("undo.branch")
771 branch = self.opener.read("undo.branch")
766 self.dirstate.setbranch(branch)
772 self.dirstate.setbranch(branch)
767 except IOError:
773 except IOError:
768 self.ui.warn(_("named branch could not be reset, "
774 self.ui.warn(_("named branch could not be reset, "
769 "current branch is still: %s\n")
775 "current branch is still: %s\n")
770 % self.dirstate.branch())
776 % self.dirstate.branch())
771 self.invalidate()
777 self.invalidate()
772 self.dirstate.invalidate()
778 self.dirstate.invalidate()
773 self.destroyed()
779 self.destroyed()
774 parents = tuple([p.rev() for p in self.parents()])
780 parents = tuple([p.rev() for p in self.parents()])
775 if len(parents) > 1:
781 if len(parents) > 1:
776 self.ui.status(_("working directory now based on "
782 self.ui.status(_("working directory now based on "
777 "revisions %d and %d\n") % parents)
783 "revisions %d and %d\n") % parents)
778 else:
784 else:
779 self.ui.status(_("working directory now based on "
785 self.ui.status(_("working directory now based on "
780 "revision %d\n") % parents)
786 "revision %d\n") % parents)
781 else:
787 else:
782 self.ui.warn(_("no rollback information available\n"))
788 self.ui.warn(_("no rollback information available\n"))
783 return 1
789 return 1
784 finally:
790 finally:
785 release(lock, wlock)
791 release(lock, wlock)
786
792
787 def invalidatecaches(self):
793 def invalidatecaches(self):
788 self._tags = None
794 self._tags = None
789 self._tagtypes = None
795 self._tagtypes = None
790 self.nodetagscache = None
796 self.nodetagscache = None
791 self._branchcache = None # in UTF-8
797 self._branchcache = None # in UTF-8
792 self._branchcachetip = None
798 self._branchcachetip = None
793
799
794 def invalidate(self):
800 def invalidate(self):
795 for a in ("changelog", "manifest", "_bookmarks", "_bookmarkcurrent"):
801 for a in ("changelog", "manifest", "_bookmarks", "_bookmarkcurrent"):
796 if a in self.__dict__:
802 if a in self.__dict__:
797 delattr(self, a)
803 delattr(self, a)
798 self.invalidatecaches()
804 self.invalidatecaches()
799
805
800 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
806 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
801 try:
807 try:
802 l = lock.lock(lockname, 0, releasefn, desc=desc)
808 l = lock.lock(lockname, 0, releasefn, desc=desc)
803 except error.LockHeld, inst:
809 except error.LockHeld, inst:
804 if not wait:
810 if not wait:
805 raise
811 raise
806 self.ui.warn(_("waiting for lock on %s held by %r\n") %
812 self.ui.warn(_("waiting for lock on %s held by %r\n") %
807 (desc, inst.locker))
813 (desc, inst.locker))
808 # default to 600 seconds timeout
814 # default to 600 seconds timeout
809 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
815 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
810 releasefn, desc=desc)
816 releasefn, desc=desc)
811 if acquirefn:
817 if acquirefn:
812 acquirefn()
818 acquirefn()
813 return l
819 return l
814
820
815 def lock(self, wait=True):
821 def lock(self, wait=True):
816 '''Lock the repository store (.hg/store) and return a weak reference
822 '''Lock the repository store (.hg/store) and return a weak reference
817 to the lock. Use this before modifying the store (e.g. committing or
823 to the lock. Use this before modifying the store (e.g. committing or
818 stripping). If you are opening a transaction, get a lock as well.)'''
824 stripping). If you are opening a transaction, get a lock as well.)'''
819 l = self._lockref and self._lockref()
825 l = self._lockref and self._lockref()
820 if l is not None and l.held:
826 if l is not None and l.held:
821 l.lock()
827 l.lock()
822 return l
828 return l
823
829
824 l = self._lock(self.sjoin("lock"), wait, self.store.write,
830 l = self._lock(self.sjoin("lock"), wait, self.store.write,
825 self.invalidate, _('repository %s') % self.origroot)
831 self.invalidate, _('repository %s') % self.origroot)
826 self._lockref = weakref.ref(l)
832 self._lockref = weakref.ref(l)
827 return l
833 return l
828
834
829 def wlock(self, wait=True):
835 def wlock(self, wait=True):
830 '''Lock the non-store parts of the repository (everything under
836 '''Lock the non-store parts of the repository (everything under
831 .hg except .hg/store) and return a weak reference to the lock.
837 .hg except .hg/store) and return a weak reference to the lock.
832 Use this before modifying files in .hg.'''
838 Use this before modifying files in .hg.'''
833 l = self._wlockref and self._wlockref()
839 l = self._wlockref and self._wlockref()
834 if l is not None and l.held:
840 if l is not None and l.held:
835 l.lock()
841 l.lock()
836 return l
842 return l
837
843
838 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
844 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
839 self.dirstate.invalidate, _('working directory of %s') %
845 self.dirstate.invalidate, _('working directory of %s') %
840 self.origroot)
846 self.origroot)
841 self._wlockref = weakref.ref(l)
847 self._wlockref = weakref.ref(l)
842 return l
848 return l
843
849
844 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
850 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
845 """
851 """
846 commit an individual file as part of a larger transaction
852 commit an individual file as part of a larger transaction
847 """
853 """
848
854
849 fname = fctx.path()
855 fname = fctx.path()
850 text = fctx.data()
856 text = fctx.data()
851 flog = self.file(fname)
857 flog = self.file(fname)
852 fparent1 = manifest1.get(fname, nullid)
858 fparent1 = manifest1.get(fname, nullid)
853 fparent2 = fparent2o = manifest2.get(fname, nullid)
859 fparent2 = fparent2o = manifest2.get(fname, nullid)
854
860
855 meta = {}
861 meta = {}
856 copy = fctx.renamed()
862 copy = fctx.renamed()
857 if copy and copy[0] != fname:
863 if copy and copy[0] != fname:
858 # Mark the new revision of this file as a copy of another
864 # Mark the new revision of this file as a copy of another
859 # file. This copy data will effectively act as a parent
865 # file. This copy data will effectively act as a parent
860 # of this new revision. If this is a merge, the first
866 # of this new revision. If this is a merge, the first
861 # parent will be the nullid (meaning "look up the copy data")
867 # parent will be the nullid (meaning "look up the copy data")
862 # and the second one will be the other parent. For example:
868 # and the second one will be the other parent. For example:
863 #
869 #
864 # 0 --- 1 --- 3 rev1 changes file foo
870 # 0 --- 1 --- 3 rev1 changes file foo
865 # \ / rev2 renames foo to bar and changes it
871 # \ / rev2 renames foo to bar and changes it
866 # \- 2 -/ rev3 should have bar with all changes and
872 # \- 2 -/ rev3 should have bar with all changes and
867 # should record that bar descends from
873 # should record that bar descends from
868 # bar in rev2 and foo in rev1
874 # bar in rev2 and foo in rev1
869 #
875 #
870 # this allows this merge to succeed:
876 # this allows this merge to succeed:
871 #
877 #
872 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
878 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
873 # \ / merging rev3 and rev4 should use bar@rev2
879 # \ / merging rev3 and rev4 should use bar@rev2
874 # \- 2 --- 4 as the merge base
880 # \- 2 --- 4 as the merge base
875 #
881 #
876
882
877 cfname = copy[0]
883 cfname = copy[0]
878 crev = manifest1.get(cfname)
884 crev = manifest1.get(cfname)
879 newfparent = fparent2
885 newfparent = fparent2
880
886
881 if manifest2: # branch merge
887 if manifest2: # branch merge
882 if fparent2 == nullid or crev is None: # copied on remote side
888 if fparent2 == nullid or crev is None: # copied on remote side
883 if cfname in manifest2:
889 if cfname in manifest2:
884 crev = manifest2[cfname]
890 crev = manifest2[cfname]
885 newfparent = fparent1
891 newfparent = fparent1
886
892
887 # find source in nearest ancestor if we've lost track
893 # find source in nearest ancestor if we've lost track
888 if not crev:
894 if not crev:
889 self.ui.debug(" %s: searching for copy revision for %s\n" %
895 self.ui.debug(" %s: searching for copy revision for %s\n" %
890 (fname, cfname))
896 (fname, cfname))
891 for ancestor in self[None].ancestors():
897 for ancestor in self[None].ancestors():
892 if cfname in ancestor:
898 if cfname in ancestor:
893 crev = ancestor[cfname].filenode()
899 crev = ancestor[cfname].filenode()
894 break
900 break
895
901
896 if crev:
902 if crev:
897 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
903 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
898 meta["copy"] = cfname
904 meta["copy"] = cfname
899 meta["copyrev"] = hex(crev)
905 meta["copyrev"] = hex(crev)
900 fparent1, fparent2 = nullid, newfparent
906 fparent1, fparent2 = nullid, newfparent
901 else:
907 else:
902 self.ui.warn(_("warning: can't find ancestor for '%s' "
908 self.ui.warn(_("warning: can't find ancestor for '%s' "
903 "copied from '%s'!\n") % (fname, cfname))
909 "copied from '%s'!\n") % (fname, cfname))
904
910
905 elif fparent2 != nullid:
911 elif fparent2 != nullid:
906 # is one parent an ancestor of the other?
912 # is one parent an ancestor of the other?
907 fparentancestor = flog.ancestor(fparent1, fparent2)
913 fparentancestor = flog.ancestor(fparent1, fparent2)
908 if fparentancestor == fparent1:
914 if fparentancestor == fparent1:
909 fparent1, fparent2 = fparent2, nullid
915 fparent1, fparent2 = fparent2, nullid
910 elif fparentancestor == fparent2:
916 elif fparentancestor == fparent2:
911 fparent2 = nullid
917 fparent2 = nullid
912
918
913 # is the file changed?
919 # is the file changed?
914 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
920 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
915 changelist.append(fname)
921 changelist.append(fname)
916 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
922 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
917
923
918 # are just the flags changed during merge?
924 # are just the flags changed during merge?
919 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
925 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
920 changelist.append(fname)
926 changelist.append(fname)
921
927
922 return fparent1
928 return fparent1
923
929
924 def commit(self, text="", user=None, date=None, match=None, force=False,
930 def commit(self, text="", user=None, date=None, match=None, force=False,
925 editor=False, extra={}):
931 editor=False, extra={}):
926 """Add a new revision to current repository.
932 """Add a new revision to current repository.
927
933
928 Revision information is gathered from the working directory,
934 Revision information is gathered from the working directory,
929 match can be used to filter the committed files. If editor is
935 match can be used to filter the committed files. If editor is
930 supplied, it is called to get a commit message.
936 supplied, it is called to get a commit message.
931 """
937 """
932
938
933 def fail(f, msg):
939 def fail(f, msg):
934 raise util.Abort('%s: %s' % (f, msg))
940 raise util.Abort('%s: %s' % (f, msg))
935
941
936 if not match:
942 if not match:
937 match = matchmod.always(self.root, '')
943 match = matchmod.always(self.root, '')
938
944
939 if not force:
945 if not force:
940 vdirs = []
946 vdirs = []
941 match.dir = vdirs.append
947 match.dir = vdirs.append
942 match.bad = fail
948 match.bad = fail
943
949
944 wlock = self.wlock()
950 wlock = self.wlock()
945 try:
951 try:
946 wctx = self[None]
952 wctx = self[None]
947 merge = len(wctx.parents()) > 1
953 merge = len(wctx.parents()) > 1
948
954
949 if (not force and merge and match and
955 if (not force and merge and match and
950 (match.files() or match.anypats())):
956 (match.files() or match.anypats())):
951 raise util.Abort(_('cannot partially commit a merge '
957 raise util.Abort(_('cannot partially commit a merge '
952 '(do not specify files or patterns)'))
958 '(do not specify files or patterns)'))
953
959
954 changes = self.status(match=match, clean=force)
960 changes = self.status(match=match, clean=force)
955 if force:
961 if force:
956 changes[0].extend(changes[6]) # mq may commit unchanged files
962 changes[0].extend(changes[6]) # mq may commit unchanged files
957
963
958 # check subrepos
964 # check subrepos
959 subs = []
965 subs = []
960 removedsubs = set()
966 removedsubs = set()
961 if '.hgsub' in wctx:
967 if '.hgsub' in wctx:
962 # only manage subrepos and .hgsubstate if .hgsub is present
968 # only manage subrepos and .hgsubstate if .hgsub is present
963 for p in wctx.parents():
969 for p in wctx.parents():
964 removedsubs.update(s for s in p.substate if match(s))
970 removedsubs.update(s for s in p.substate if match(s))
965 for s in wctx.substate:
971 for s in wctx.substate:
966 removedsubs.discard(s)
972 removedsubs.discard(s)
967 if match(s) and wctx.sub(s).dirty():
973 if match(s) and wctx.sub(s).dirty():
968 subs.append(s)
974 subs.append(s)
969 if (subs or removedsubs):
975 if (subs or removedsubs):
970 if (not match('.hgsub') and
976 if (not match('.hgsub') and
971 '.hgsub' in (wctx.modified() + wctx.added())):
977 '.hgsub' in (wctx.modified() + wctx.added())):
972 raise util.Abort(
978 raise util.Abort(
973 _("can't commit subrepos without .hgsub"))
979 _("can't commit subrepos without .hgsub"))
974 if '.hgsubstate' not in changes[0]:
980 if '.hgsubstate' not in changes[0]:
975 changes[0].insert(0, '.hgsubstate')
981 changes[0].insert(0, '.hgsubstate')
976 if '.hgsubstate' in changes[2]:
982 if '.hgsubstate' in changes[2]:
977 changes[2].remove('.hgsubstate')
983 changes[2].remove('.hgsubstate')
978 elif '.hgsub' in changes[2]:
984 elif '.hgsub' in changes[2]:
979 # clean up .hgsubstate when .hgsub is removed
985 # clean up .hgsubstate when .hgsub is removed
980 if ('.hgsubstate' in wctx and
986 if ('.hgsubstate' in wctx and
981 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
987 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
982 changes[2].insert(0, '.hgsubstate')
988 changes[2].insert(0, '.hgsubstate')
983
989
984 if subs and not self.ui.configbool('ui', 'commitsubrepos', True):
990 if subs and not self.ui.configbool('ui', 'commitsubrepos', True):
985 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
991 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
986 if changedsubs:
992 if changedsubs:
987 raise util.Abort(_("uncommitted changes in subrepo %s")
993 raise util.Abort(_("uncommitted changes in subrepo %s")
988 % changedsubs[0])
994 % changedsubs[0])
989
995
990 # make sure all explicit patterns are matched
996 # make sure all explicit patterns are matched
991 if not force and match.files():
997 if not force and match.files():
992 matched = set(changes[0] + changes[1] + changes[2])
998 matched = set(changes[0] + changes[1] + changes[2])
993
999
994 for f in match.files():
1000 for f in match.files():
995 if f == '.' or f in matched or f in wctx.substate:
1001 if f == '.' or f in matched or f in wctx.substate:
996 continue
1002 continue
997 if f in changes[3]: # missing
1003 if f in changes[3]: # missing
998 fail(f, _('file not found!'))
1004 fail(f, _('file not found!'))
999 if f in vdirs: # visited directory
1005 if f in vdirs: # visited directory
1000 d = f + '/'
1006 d = f + '/'
1001 for mf in matched:
1007 for mf in matched:
1002 if mf.startswith(d):
1008 if mf.startswith(d):
1003 break
1009 break
1004 else:
1010 else:
1005 fail(f, _("no match under directory!"))
1011 fail(f, _("no match under directory!"))
1006 elif f not in self.dirstate:
1012 elif f not in self.dirstate:
1007 fail(f, _("file not tracked!"))
1013 fail(f, _("file not tracked!"))
1008
1014
1009 if (not force and not extra.get("close") and not merge
1015 if (not force and not extra.get("close") and not merge
1010 and not (changes[0] or changes[1] or changes[2])
1016 and not (changes[0] or changes[1] or changes[2])
1011 and wctx.branch() == wctx.p1().branch()):
1017 and wctx.branch() == wctx.p1().branch()):
1012 return None
1018 return None
1013
1019
1014 ms = mergemod.mergestate(self)
1020 ms = mergemod.mergestate(self)
1015 for f in changes[0]:
1021 for f in changes[0]:
1016 if f in ms and ms[f] == 'u':
1022 if f in ms and ms[f] == 'u':
1017 raise util.Abort(_("unresolved merge conflicts "
1023 raise util.Abort(_("unresolved merge conflicts "
1018 "(see hg help resolve)"))
1024 "(see hg help resolve)"))
1019
1025
1020 cctx = context.workingctx(self, text, user, date, extra, changes)
1026 cctx = context.workingctx(self, text, user, date, extra, changes)
1021 if editor:
1027 if editor:
1022 cctx._text = editor(self, cctx, subs)
1028 cctx._text = editor(self, cctx, subs)
1023 edited = (text != cctx._text)
1029 edited = (text != cctx._text)
1024
1030
1025 # commit subs
1031 # commit subs
1026 if subs or removedsubs:
1032 if subs or removedsubs:
1027 state = wctx.substate.copy()
1033 state = wctx.substate.copy()
1028 for s in sorted(subs):
1034 for s in sorted(subs):
1029 sub = wctx.sub(s)
1035 sub = wctx.sub(s)
1030 self.ui.status(_('committing subrepository %s\n') %
1036 self.ui.status(_('committing subrepository %s\n') %
1031 subrepo.subrelpath(sub))
1037 subrepo.subrelpath(sub))
1032 sr = sub.commit(cctx._text, user, date)
1038 sr = sub.commit(cctx._text, user, date)
1033 state[s] = (state[s][0], sr)
1039 state[s] = (state[s][0], sr)
1034 subrepo.writestate(self, state)
1040 subrepo.writestate(self, state)
1035
1041
1036 # Save commit message in case this transaction gets rolled back
1042 # Save commit message in case this transaction gets rolled back
1037 # (e.g. by a pretxncommit hook). Leave the content alone on
1043 # (e.g. by a pretxncommit hook). Leave the content alone on
1038 # the assumption that the user will use the same editor again.
1044 # the assumption that the user will use the same editor again.
1039 msgfn = self.savecommitmessage(cctx._text)
1045 msgfn = self.savecommitmessage(cctx._text)
1040
1046
1041 p1, p2 = self.dirstate.parents()
1047 p1, p2 = self.dirstate.parents()
1042 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1048 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1043 try:
1049 try:
1044 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1050 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1045 ret = self.commitctx(cctx, True)
1051 ret = self.commitctx(cctx, True)
1046 except:
1052 except:
1047 if edited:
1053 if edited:
1048 self.ui.write(
1054 self.ui.write(
1049 _('note: commit message saved in %s\n') % msgfn)
1055 _('note: commit message saved in %s\n') % msgfn)
1050 raise
1056 raise
1051
1057
1052 # update bookmarks, dirstate and mergestate
1058 # update bookmarks, dirstate and mergestate
1053 bookmarks.update(self, p1, ret)
1059 bookmarks.update(self, p1, ret)
1054 for f in changes[0] + changes[1]:
1060 for f in changes[0] + changes[1]:
1055 self.dirstate.normal(f)
1061 self.dirstate.normal(f)
1056 for f in changes[2]:
1062 for f in changes[2]:
1057 self.dirstate.drop(f)
1063 self.dirstate.drop(f)
1058 self.dirstate.setparents(ret)
1064 self.dirstate.setparents(ret)
1059 ms.reset()
1065 ms.reset()
1060 finally:
1066 finally:
1061 wlock.release()
1067 wlock.release()
1062
1068
1063 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1069 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1064 return ret
1070 return ret
1065
1071
1066 def commitctx(self, ctx, error=False):
1072 def commitctx(self, ctx, error=False):
1067 """Add a new revision to current repository.
1073 """Add a new revision to current repository.
1068 Revision information is passed via the context argument.
1074 Revision information is passed via the context argument.
1069 """
1075 """
1070
1076
1071 tr = lock = None
1077 tr = lock = None
1072 removed = list(ctx.removed())
1078 removed = list(ctx.removed())
1073 p1, p2 = ctx.p1(), ctx.p2()
1079 p1, p2 = ctx.p1(), ctx.p2()
1074 user = ctx.user()
1080 user = ctx.user()
1075
1081
1076 lock = self.lock()
1082 lock = self.lock()
1077 try:
1083 try:
1078 tr = self.transaction("commit")
1084 tr = self.transaction("commit")
1079 trp = weakref.proxy(tr)
1085 trp = weakref.proxy(tr)
1080
1086
1081 if ctx.files():
1087 if ctx.files():
1082 m1 = p1.manifest().copy()
1088 m1 = p1.manifest().copy()
1083 m2 = p2.manifest()
1089 m2 = p2.manifest()
1084
1090
1085 # check in files
1091 # check in files
1086 new = {}
1092 new = {}
1087 changed = []
1093 changed = []
1088 linkrev = len(self)
1094 linkrev = len(self)
1089 for f in sorted(ctx.modified() + ctx.added()):
1095 for f in sorted(ctx.modified() + ctx.added()):
1090 self.ui.note(f + "\n")
1096 self.ui.note(f + "\n")
1091 try:
1097 try:
1092 fctx = ctx[f]
1098 fctx = ctx[f]
1093 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1099 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1094 changed)
1100 changed)
1095 m1.set(f, fctx.flags())
1101 m1.set(f, fctx.flags())
1096 except OSError, inst:
1102 except OSError, inst:
1097 self.ui.warn(_("trouble committing %s!\n") % f)
1103 self.ui.warn(_("trouble committing %s!\n") % f)
1098 raise
1104 raise
1099 except IOError, inst:
1105 except IOError, inst:
1100 errcode = getattr(inst, 'errno', errno.ENOENT)
1106 errcode = getattr(inst, 'errno', errno.ENOENT)
1101 if error or errcode and errcode != errno.ENOENT:
1107 if error or errcode and errcode != errno.ENOENT:
1102 self.ui.warn(_("trouble committing %s!\n") % f)
1108 self.ui.warn(_("trouble committing %s!\n") % f)
1103 raise
1109 raise
1104 else:
1110 else:
1105 removed.append(f)
1111 removed.append(f)
1106
1112
1107 # update manifest
1113 # update manifest
1108 m1.update(new)
1114 m1.update(new)
1109 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1115 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1110 drop = [f for f in removed if f in m1]
1116 drop = [f for f in removed if f in m1]
1111 for f in drop:
1117 for f in drop:
1112 del m1[f]
1118 del m1[f]
1113 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1119 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1114 p2.manifestnode(), (new, drop))
1120 p2.manifestnode(), (new, drop))
1115 files = changed + removed
1121 files = changed + removed
1116 else:
1122 else:
1117 mn = p1.manifestnode()
1123 mn = p1.manifestnode()
1118 files = []
1124 files = []
1119
1125
1120 # update changelog
1126 # update changelog
1121 self.changelog.delayupdate()
1127 self.changelog.delayupdate()
1122 n = self.changelog.add(mn, files, ctx.description(),
1128 n = self.changelog.add(mn, files, ctx.description(),
1123 trp, p1.node(), p2.node(),
1129 trp, p1.node(), p2.node(),
1124 user, ctx.date(), ctx.extra().copy())
1130 user, ctx.date(), ctx.extra().copy())
1125 p = lambda: self.changelog.writepending() and self.root or ""
1131 p = lambda: self.changelog.writepending() and self.root or ""
1126 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1132 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1127 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1133 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1128 parent2=xp2, pending=p)
1134 parent2=xp2, pending=p)
1129 self.changelog.finalize(trp)
1135 self.changelog.finalize(trp)
1130 tr.close()
1136 tr.close()
1131
1137
1132 if self._branchcache:
1138 if self._branchcache:
1133 self.updatebranchcache()
1139 self.updatebranchcache()
1134 return n
1140 return n
1135 finally:
1141 finally:
1136 if tr:
1142 if tr:
1137 tr.release()
1143 tr.release()
1138 lock.release()
1144 lock.release()
1139
1145
1140 def destroyed(self):
1146 def destroyed(self):
1141 '''Inform the repository that nodes have been destroyed.
1147 '''Inform the repository that nodes have been destroyed.
1142 Intended for use by strip and rollback, so there's a common
1148 Intended for use by strip and rollback, so there's a common
1143 place for anything that has to be done after destroying history.'''
1149 place for anything that has to be done after destroying history.'''
1144 # XXX it might be nice if we could take the list of destroyed
1150 # XXX it might be nice if we could take the list of destroyed
1145 # nodes, but I don't see an easy way for rollback() to do that
1151 # nodes, but I don't see an easy way for rollback() to do that
1146
1152
1147 # Ensure the persistent tag cache is updated. Doing it now
1153 # Ensure the persistent tag cache is updated. Doing it now
1148 # means that the tag cache only has to worry about destroyed
1154 # means that the tag cache only has to worry about destroyed
1149 # heads immediately after a strip/rollback. That in turn
1155 # heads immediately after a strip/rollback. That in turn
1150 # guarantees that "cachetip == currenttip" (comparing both rev
1156 # guarantees that "cachetip == currenttip" (comparing both rev
1151 # and node) always means no nodes have been added or destroyed.
1157 # and node) always means no nodes have been added or destroyed.
1152
1158
1153 # XXX this is suboptimal when qrefresh'ing: we strip the current
1159 # XXX this is suboptimal when qrefresh'ing: we strip the current
1154 # head, refresh the tag cache, then immediately add a new head.
1160 # head, refresh the tag cache, then immediately add a new head.
1155 # But I think doing it this way is necessary for the "instant
1161 # But I think doing it this way is necessary for the "instant
1156 # tag cache retrieval" case to work.
1162 # tag cache retrieval" case to work.
1157 self.invalidatecaches()
1163 self.invalidatecaches()
1158
1164
1159 def walk(self, match, node=None):
1165 def walk(self, match, node=None):
1160 '''
1166 '''
1161 walk recursively through the directory tree or a given
1167 walk recursively through the directory tree or a given
1162 changeset, finding all files matched by the match
1168 changeset, finding all files matched by the match
1163 function
1169 function
1164 '''
1170 '''
1165 return self[node].walk(match)
1171 return self[node].walk(match)
1166
1172
1167 def status(self, node1='.', node2=None, match=None,
1173 def status(self, node1='.', node2=None, match=None,
1168 ignored=False, clean=False, unknown=False,
1174 ignored=False, clean=False, unknown=False,
1169 listsubrepos=False):
1175 listsubrepos=False):
1170 """return status of files between two nodes or node and working directory
1176 """return status of files between two nodes or node and working directory
1171
1177
1172 If node1 is None, use the first dirstate parent instead.
1178 If node1 is None, use the first dirstate parent instead.
1173 If node2 is None, compare node1 with working directory.
1179 If node2 is None, compare node1 with working directory.
1174 """
1180 """
1175
1181
1176 def mfmatches(ctx):
1182 def mfmatches(ctx):
1177 mf = ctx.manifest().copy()
1183 mf = ctx.manifest().copy()
1178 for fn in mf.keys():
1184 for fn in mf.keys():
1179 if not match(fn):
1185 if not match(fn):
1180 del mf[fn]
1186 del mf[fn]
1181 return mf
1187 return mf
1182
1188
1183 if isinstance(node1, context.changectx):
1189 if isinstance(node1, context.changectx):
1184 ctx1 = node1
1190 ctx1 = node1
1185 else:
1191 else:
1186 ctx1 = self[node1]
1192 ctx1 = self[node1]
1187 if isinstance(node2, context.changectx):
1193 if isinstance(node2, context.changectx):
1188 ctx2 = node2
1194 ctx2 = node2
1189 else:
1195 else:
1190 ctx2 = self[node2]
1196 ctx2 = self[node2]
1191
1197
1192 working = ctx2.rev() is None
1198 working = ctx2.rev() is None
1193 parentworking = working and ctx1 == self['.']
1199 parentworking = working and ctx1 == self['.']
1194 match = match or matchmod.always(self.root, self.getcwd())
1200 match = match or matchmod.always(self.root, self.getcwd())
1195 listignored, listclean, listunknown = ignored, clean, unknown
1201 listignored, listclean, listunknown = ignored, clean, unknown
1196
1202
1197 # load earliest manifest first for caching reasons
1203 # load earliest manifest first for caching reasons
1198 if not working and ctx2.rev() < ctx1.rev():
1204 if not working and ctx2.rev() < ctx1.rev():
1199 ctx2.manifest()
1205 ctx2.manifest()
1200
1206
1201 if not parentworking:
1207 if not parentworking:
1202 def bad(f, msg):
1208 def bad(f, msg):
1203 if f not in ctx1:
1209 if f not in ctx1:
1204 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1210 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1205 match.bad = bad
1211 match.bad = bad
1206
1212
1207 if working: # we need to scan the working dir
1213 if working: # we need to scan the working dir
1208 subrepos = []
1214 subrepos = []
1209 if '.hgsub' in self.dirstate:
1215 if '.hgsub' in self.dirstate:
1210 subrepos = ctx2.substate.keys()
1216 subrepos = ctx2.substate.keys()
1211 s = self.dirstate.status(match, subrepos, listignored,
1217 s = self.dirstate.status(match, subrepos, listignored,
1212 listclean, listunknown)
1218 listclean, listunknown)
1213 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1219 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1214
1220
1215 # check for any possibly clean files
1221 # check for any possibly clean files
1216 if parentworking and cmp:
1222 if parentworking and cmp:
1217 fixup = []
1223 fixup = []
1218 # do a full compare of any files that might have changed
1224 # do a full compare of any files that might have changed
1219 for f in sorted(cmp):
1225 for f in sorted(cmp):
1220 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1226 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1221 or ctx1[f].cmp(ctx2[f])):
1227 or ctx1[f].cmp(ctx2[f])):
1222 modified.append(f)
1228 modified.append(f)
1223 else:
1229 else:
1224 fixup.append(f)
1230 fixup.append(f)
1225
1231
1226 # update dirstate for files that are actually clean
1232 # update dirstate for files that are actually clean
1227 if fixup:
1233 if fixup:
1228 if listclean:
1234 if listclean:
1229 clean += fixup
1235 clean += fixup
1230
1236
1231 try:
1237 try:
1232 # updating the dirstate is optional
1238 # updating the dirstate is optional
1233 # so we don't wait on the lock
1239 # so we don't wait on the lock
1234 wlock = self.wlock(False)
1240 wlock = self.wlock(False)
1235 try:
1241 try:
1236 for f in fixup:
1242 for f in fixup:
1237 self.dirstate.normal(f)
1243 self.dirstate.normal(f)
1238 finally:
1244 finally:
1239 wlock.release()
1245 wlock.release()
1240 except error.LockError:
1246 except error.LockError:
1241 pass
1247 pass
1242
1248
1243 if not parentworking:
1249 if not parentworking:
1244 mf1 = mfmatches(ctx1)
1250 mf1 = mfmatches(ctx1)
1245 if working:
1251 if working:
1246 # we are comparing working dir against non-parent
1252 # we are comparing working dir against non-parent
1247 # generate a pseudo-manifest for the working dir
1253 # generate a pseudo-manifest for the working dir
1248 mf2 = mfmatches(self['.'])
1254 mf2 = mfmatches(self['.'])
1249 for f in cmp + modified + added:
1255 for f in cmp + modified + added:
1250 mf2[f] = None
1256 mf2[f] = None
1251 mf2.set(f, ctx2.flags(f))
1257 mf2.set(f, ctx2.flags(f))
1252 for f in removed:
1258 for f in removed:
1253 if f in mf2:
1259 if f in mf2:
1254 del mf2[f]
1260 del mf2[f]
1255 else:
1261 else:
1256 # we are comparing two revisions
1262 # we are comparing two revisions
1257 deleted, unknown, ignored = [], [], []
1263 deleted, unknown, ignored = [], [], []
1258 mf2 = mfmatches(ctx2)
1264 mf2 = mfmatches(ctx2)
1259
1265
1260 modified, added, clean = [], [], []
1266 modified, added, clean = [], [], []
1261 for fn in mf2:
1267 for fn in mf2:
1262 if fn in mf1:
1268 if fn in mf1:
1263 if (fn not in deleted and
1269 if (fn not in deleted and
1264 (mf1.flags(fn) != mf2.flags(fn) or
1270 (mf1.flags(fn) != mf2.flags(fn) or
1265 (mf1[fn] != mf2[fn] and
1271 (mf1[fn] != mf2[fn] and
1266 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1272 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1267 modified.append(fn)
1273 modified.append(fn)
1268 elif listclean:
1274 elif listclean:
1269 clean.append(fn)
1275 clean.append(fn)
1270 del mf1[fn]
1276 del mf1[fn]
1271 elif fn not in deleted:
1277 elif fn not in deleted:
1272 added.append(fn)
1278 added.append(fn)
1273 removed = mf1.keys()
1279 removed = mf1.keys()
1274
1280
1275 r = modified, added, removed, deleted, unknown, ignored, clean
1281 r = modified, added, removed, deleted, unknown, ignored, clean
1276
1282
1277 if listsubrepos:
1283 if listsubrepos:
1278 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1284 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1279 if working:
1285 if working:
1280 rev2 = None
1286 rev2 = None
1281 else:
1287 else:
1282 rev2 = ctx2.substate[subpath][1]
1288 rev2 = ctx2.substate[subpath][1]
1283 try:
1289 try:
1284 submatch = matchmod.narrowmatcher(subpath, match)
1290 submatch = matchmod.narrowmatcher(subpath, match)
1285 s = sub.status(rev2, match=submatch, ignored=listignored,
1291 s = sub.status(rev2, match=submatch, ignored=listignored,
1286 clean=listclean, unknown=listunknown,
1292 clean=listclean, unknown=listunknown,
1287 listsubrepos=True)
1293 listsubrepos=True)
1288 for rfiles, sfiles in zip(r, s):
1294 for rfiles, sfiles in zip(r, s):
1289 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1295 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1290 except error.LookupError:
1296 except error.LookupError:
1291 self.ui.status(_("skipping missing subrepository: %s\n")
1297 self.ui.status(_("skipping missing subrepository: %s\n")
1292 % subpath)
1298 % subpath)
1293
1299
1294 for l in r:
1300 for l in r:
1295 l.sort()
1301 l.sort()
1296 return r
1302 return r
1297
1303
1298 def heads(self, start=None):
1304 def heads(self, start=None):
1299 heads = self.changelog.heads(start)
1305 heads = self.changelog.heads(start)
1300 # sort the output in rev descending order
1306 # sort the output in rev descending order
1301 return sorted(heads, key=self.changelog.rev, reverse=True)
1307 return sorted(heads, key=self.changelog.rev, reverse=True)
1302
1308
1303 def branchheads(self, branch=None, start=None, closed=False):
1309 def branchheads(self, branch=None, start=None, closed=False):
1304 '''return a (possibly filtered) list of heads for the given branch
1310 '''return a (possibly filtered) list of heads for the given branch
1305
1311
1306 Heads are returned in topological order, from newest to oldest.
1312 Heads are returned in topological order, from newest to oldest.
1307 If branch is None, use the dirstate branch.
1313 If branch is None, use the dirstate branch.
1308 If start is not None, return only heads reachable from start.
1314 If start is not None, return only heads reachable from start.
1309 If closed is True, return heads that are marked as closed as well.
1315 If closed is True, return heads that are marked as closed as well.
1310 '''
1316 '''
1311 if branch is None:
1317 if branch is None:
1312 branch = self[None].branch()
1318 branch = self[None].branch()
1313 branches = self.branchmap()
1319 branches = self.branchmap()
1314 if branch not in branches:
1320 if branch not in branches:
1315 return []
1321 return []
1316 # the cache returns heads ordered lowest to highest
1322 # the cache returns heads ordered lowest to highest
1317 bheads = list(reversed(branches[branch]))
1323 bheads = list(reversed(branches[branch]))
1318 if start is not None:
1324 if start is not None:
1319 # filter out the heads that cannot be reached from startrev
1325 # filter out the heads that cannot be reached from startrev
1320 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1326 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1321 bheads = [h for h in bheads if h in fbheads]
1327 bheads = [h for h in bheads if h in fbheads]
1322 if not closed:
1328 if not closed:
1323 bheads = [h for h in bheads if
1329 bheads = [h for h in bheads if
1324 ('close' not in self.changelog.read(h)[5])]
1330 ('close' not in self.changelog.read(h)[5])]
1325 return bheads
1331 return bheads
1326
1332
1327 def branches(self, nodes):
1333 def branches(self, nodes):
1328 if not nodes:
1334 if not nodes:
1329 nodes = [self.changelog.tip()]
1335 nodes = [self.changelog.tip()]
1330 b = []
1336 b = []
1331 for n in nodes:
1337 for n in nodes:
1332 t = n
1338 t = n
1333 while True:
1339 while True:
1334 p = self.changelog.parents(n)
1340 p = self.changelog.parents(n)
1335 if p[1] != nullid or p[0] == nullid:
1341 if p[1] != nullid or p[0] == nullid:
1336 b.append((t, n, p[0], p[1]))
1342 b.append((t, n, p[0], p[1]))
1337 break
1343 break
1338 n = p[0]
1344 n = p[0]
1339 return b
1345 return b
1340
1346
1341 def between(self, pairs):
1347 def between(self, pairs):
1342 r = []
1348 r = []
1343
1349
1344 for top, bottom in pairs:
1350 for top, bottom in pairs:
1345 n, l, i = top, [], 0
1351 n, l, i = top, [], 0
1346 f = 1
1352 f = 1
1347
1353
1348 while n != bottom and n != nullid:
1354 while n != bottom and n != nullid:
1349 p = self.changelog.parents(n)[0]
1355 p = self.changelog.parents(n)[0]
1350 if i == f:
1356 if i == f:
1351 l.append(n)
1357 l.append(n)
1352 f = f * 2
1358 f = f * 2
1353 n = p
1359 n = p
1354 i += 1
1360 i += 1
1355
1361
1356 r.append(l)
1362 r.append(l)
1357
1363
1358 return r
1364 return r
1359
1365
1360 def pull(self, remote, heads=None, force=False):
1366 def pull(self, remote, heads=None, force=False):
1361 lock = self.lock()
1367 lock = self.lock()
1362 try:
1368 try:
1363 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1369 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1364 force=force)
1370 force=force)
1365 common, fetch, rheads = tmp
1371 common, fetch, rheads = tmp
1366 if not fetch:
1372 if not fetch:
1367 self.ui.status(_("no changes found\n"))
1373 self.ui.status(_("no changes found\n"))
1368 result = 0
1374 result = 0
1369 else:
1375 else:
1370 if heads is None and list(common) == [nullid]:
1376 if heads is None and list(common) == [nullid]:
1371 self.ui.status(_("requesting all changes\n"))
1377 self.ui.status(_("requesting all changes\n"))
1372 elif heads is None and remote.capable('changegroupsubset'):
1378 elif heads is None and remote.capable('changegroupsubset'):
1373 # issue1320, avoid a race if remote changed after discovery
1379 # issue1320, avoid a race if remote changed after discovery
1374 heads = rheads
1380 heads = rheads
1375
1381
1376 if remote.capable('getbundle'):
1382 if remote.capable('getbundle'):
1377 cg = remote.getbundle('pull', common=common,
1383 cg = remote.getbundle('pull', common=common,
1378 heads=heads or rheads)
1384 heads=heads or rheads)
1379 elif heads is None:
1385 elif heads is None:
1380 cg = remote.changegroup(fetch, 'pull')
1386 cg = remote.changegroup(fetch, 'pull')
1381 elif not remote.capable('changegroupsubset'):
1387 elif not remote.capable('changegroupsubset'):
1382 raise util.Abort(_("partial pull cannot be done because "
1388 raise util.Abort(_("partial pull cannot be done because "
1383 "other repository doesn't support "
1389 "other repository doesn't support "
1384 "changegroupsubset."))
1390 "changegroupsubset."))
1385 else:
1391 else:
1386 cg = remote.changegroupsubset(fetch, heads, 'pull')
1392 cg = remote.changegroupsubset(fetch, heads, 'pull')
1387 result = self.addchangegroup(cg, 'pull', remote.url(),
1393 result = self.addchangegroup(cg, 'pull', remote.url(),
1388 lock=lock)
1394 lock=lock)
1389 finally:
1395 finally:
1390 lock.release()
1396 lock.release()
1391
1397
1392 return result
1398 return result
1393
1399
1394 def checkpush(self, force, revs):
1400 def checkpush(self, force, revs):
1395 """Extensions can override this function if additional checks have
1401 """Extensions can override this function if additional checks have
1396 to be performed before pushing, or call it if they override push
1402 to be performed before pushing, or call it if they override push
1397 command.
1403 command.
1398 """
1404 """
1399 pass
1405 pass
1400
1406
1401 def push(self, remote, force=False, revs=None, newbranch=False):
1407 def push(self, remote, force=False, revs=None, newbranch=False):
1402 '''Push outgoing changesets (limited by revs) from the current
1408 '''Push outgoing changesets (limited by revs) from the current
1403 repository to remote. Return an integer:
1409 repository to remote. Return an integer:
1404 - 0 means HTTP error *or* nothing to push
1410 - 0 means HTTP error *or* nothing to push
1405 - 1 means we pushed and remote head count is unchanged *or*
1411 - 1 means we pushed and remote head count is unchanged *or*
1406 we have outgoing changesets but refused to push
1412 we have outgoing changesets but refused to push
1407 - other values as described by addchangegroup()
1413 - other values as described by addchangegroup()
1408 '''
1414 '''
1409 # there are two ways to push to remote repo:
1415 # there are two ways to push to remote repo:
1410 #
1416 #
1411 # addchangegroup assumes local user can lock remote
1417 # addchangegroup assumes local user can lock remote
1412 # repo (local filesystem, old ssh servers).
1418 # repo (local filesystem, old ssh servers).
1413 #
1419 #
1414 # unbundle assumes local user cannot lock remote repo (new ssh
1420 # unbundle assumes local user cannot lock remote repo (new ssh
1415 # servers, http servers).
1421 # servers, http servers).
1416
1422
1417 self.checkpush(force, revs)
1423 self.checkpush(force, revs)
1418 lock = None
1424 lock = None
1419 unbundle = remote.capable('unbundle')
1425 unbundle = remote.capable('unbundle')
1420 if not unbundle:
1426 if not unbundle:
1421 lock = remote.lock()
1427 lock = remote.lock()
1422 try:
1428 try:
1423 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1429 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1424 newbranch)
1430 newbranch)
1425 ret = remote_heads
1431 ret = remote_heads
1426 if cg is not None:
1432 if cg is not None:
1427 if unbundle:
1433 if unbundle:
1428 # local repo finds heads on server, finds out what
1434 # local repo finds heads on server, finds out what
1429 # revs it must push. once revs transferred, if server
1435 # revs it must push. once revs transferred, if server
1430 # finds it has different heads (someone else won
1436 # finds it has different heads (someone else won
1431 # commit/push race), server aborts.
1437 # commit/push race), server aborts.
1432 if force:
1438 if force:
1433 remote_heads = ['force']
1439 remote_heads = ['force']
1434 # ssh: return remote's addchangegroup()
1440 # ssh: return remote's addchangegroup()
1435 # http: return remote's addchangegroup() or 0 for error
1441 # http: return remote's addchangegroup() or 0 for error
1436 ret = remote.unbundle(cg, remote_heads, 'push')
1442 ret = remote.unbundle(cg, remote_heads, 'push')
1437 else:
1443 else:
1438 # we return an integer indicating remote head count change
1444 # we return an integer indicating remote head count change
1439 ret = remote.addchangegroup(cg, 'push', self.url(),
1445 ret = remote.addchangegroup(cg, 'push', self.url(),
1440 lock=lock)
1446 lock=lock)
1441 finally:
1447 finally:
1442 if lock is not None:
1448 if lock is not None:
1443 lock.release()
1449 lock.release()
1444
1450
1445 self.ui.debug("checking for updated bookmarks\n")
1451 self.ui.debug("checking for updated bookmarks\n")
1446 rb = remote.listkeys('bookmarks')
1452 rb = remote.listkeys('bookmarks')
1447 for k in rb.keys():
1453 for k in rb.keys():
1448 if k in self._bookmarks:
1454 if k in self._bookmarks:
1449 nr, nl = rb[k], hex(self._bookmarks[k])
1455 nr, nl = rb[k], hex(self._bookmarks[k])
1450 if nr in self:
1456 if nr in self:
1451 cr = self[nr]
1457 cr = self[nr]
1452 cl = self[nl]
1458 cl = self[nl]
1453 if cl in cr.descendants():
1459 if cl in cr.descendants():
1454 r = remote.pushkey('bookmarks', k, nr, nl)
1460 r = remote.pushkey('bookmarks', k, nr, nl)
1455 if r:
1461 if r:
1456 self.ui.status(_("updating bookmark %s\n") % k)
1462 self.ui.status(_("updating bookmark %s\n") % k)
1457 else:
1463 else:
1458 self.ui.warn(_('updating bookmark %s'
1464 self.ui.warn(_('updating bookmark %s'
1459 ' failed!\n') % k)
1465 ' failed!\n') % k)
1460
1466
1461 return ret
1467 return ret
1462
1468
1463 def changegroupinfo(self, nodes, source):
1469 def changegroupinfo(self, nodes, source):
1464 if self.ui.verbose or source == 'bundle':
1470 if self.ui.verbose or source == 'bundle':
1465 self.ui.status(_("%d changesets found\n") % len(nodes))
1471 self.ui.status(_("%d changesets found\n") % len(nodes))
1466 if self.ui.debugflag:
1472 if self.ui.debugflag:
1467 self.ui.debug("list of changesets:\n")
1473 self.ui.debug("list of changesets:\n")
1468 for node in nodes:
1474 for node in nodes:
1469 self.ui.debug("%s\n" % hex(node))
1475 self.ui.debug("%s\n" % hex(node))
1470
1476
1471 def changegroupsubset(self, bases, heads, source):
1477 def changegroupsubset(self, bases, heads, source):
1472 """Compute a changegroup consisting of all the nodes that are
1478 """Compute a changegroup consisting of all the nodes that are
1473 descendants of any of the bases and ancestors of any of the heads.
1479 descendants of any of the bases and ancestors of any of the heads.
1474 Return a chunkbuffer object whose read() method will return
1480 Return a chunkbuffer object whose read() method will return
1475 successive changegroup chunks.
1481 successive changegroup chunks.
1476
1482
1477 It is fairly complex as determining which filenodes and which
1483 It is fairly complex as determining which filenodes and which
1478 manifest nodes need to be included for the changeset to be complete
1484 manifest nodes need to be included for the changeset to be complete
1479 is non-trivial.
1485 is non-trivial.
1480
1486
1481 Another wrinkle is doing the reverse, figuring out which changeset in
1487 Another wrinkle is doing the reverse, figuring out which changeset in
1482 the changegroup a particular filenode or manifestnode belongs to.
1488 the changegroup a particular filenode or manifestnode belongs to.
1483 """
1489 """
1484 cl = self.changelog
1490 cl = self.changelog
1485 if not bases:
1491 if not bases:
1486 bases = [nullid]
1492 bases = [nullid]
1487 csets, bases, heads = cl.nodesbetween(bases, heads)
1493 csets, bases, heads = cl.nodesbetween(bases, heads)
1488 # We assume that all ancestors of bases are known
1494 # We assume that all ancestors of bases are known
1489 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1495 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1490 return self._changegroupsubset(common, csets, heads, source)
1496 return self._changegroupsubset(common, csets, heads, source)
1491
1497
1492 def getbundle(self, source, heads=None, common=None):
1498 def getbundle(self, source, heads=None, common=None):
1493 """Like changegroupsubset, but returns the set difference between the
1499 """Like changegroupsubset, but returns the set difference between the
1494 ancestors of heads and the ancestors common.
1500 ancestors of heads and the ancestors common.
1495
1501
1496 If heads is None, use the local heads. If common is None, use [nullid].
1502 If heads is None, use the local heads. If common is None, use [nullid].
1497
1503
1498 The nodes in common might not all be known locally due to the way the
1504 The nodes in common might not all be known locally due to the way the
1499 current discovery protocol works.
1505 current discovery protocol works.
1500 """
1506 """
1501 cl = self.changelog
1507 cl = self.changelog
1502 if common:
1508 if common:
1503 nm = cl.nodemap
1509 nm = cl.nodemap
1504 common = [n for n in common if n in nm]
1510 common = [n for n in common if n in nm]
1505 else:
1511 else:
1506 common = [nullid]
1512 common = [nullid]
1507 if not heads:
1513 if not heads:
1508 heads = cl.heads()
1514 heads = cl.heads()
1509 common, missing = cl.findcommonmissing(common, heads)
1515 common, missing = cl.findcommonmissing(common, heads)
1510 if not missing:
1516 if not missing:
1511 return None
1517 return None
1512 return self._changegroupsubset(common, missing, heads, source)
1518 return self._changegroupsubset(common, missing, heads, source)
1513
1519
1514 def _changegroupsubset(self, commonrevs, csets, heads, source):
1520 def _changegroupsubset(self, commonrevs, csets, heads, source):
1515
1521
1516 cl = self.changelog
1522 cl = self.changelog
1517 mf = self.manifest
1523 mf = self.manifest
1518 mfs = {} # needed manifests
1524 mfs = {} # needed manifests
1519 fnodes = {} # needed file nodes
1525 fnodes = {} # needed file nodes
1520 changedfiles = set()
1526 changedfiles = set()
1521 fstate = ['', {}]
1527 fstate = ['', {}]
1522 count = [0]
1528 count = [0]
1523
1529
1524 # can we go through the fast path ?
1530 # can we go through the fast path ?
1525 heads.sort()
1531 heads.sort()
1526 if heads == sorted(self.heads()):
1532 if heads == sorted(self.heads()):
1527 return self._changegroup(csets, source)
1533 return self._changegroup(csets, source)
1528
1534
1529 # slow path
1535 # slow path
1530 self.hook('preoutgoing', throw=True, source=source)
1536 self.hook('preoutgoing', throw=True, source=source)
1531 self.changegroupinfo(csets, source)
1537 self.changegroupinfo(csets, source)
1532
1538
1533 # filter any nodes that claim to be part of the known set
1539 # filter any nodes that claim to be part of the known set
1534 def prune(revlog, missing):
1540 def prune(revlog, missing):
1535 return [n for n in missing
1541 return [n for n in missing
1536 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1542 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1537
1543
1538 def lookup(revlog, x):
1544 def lookup(revlog, x):
1539 if revlog == cl:
1545 if revlog == cl:
1540 c = cl.read(x)
1546 c = cl.read(x)
1541 changedfiles.update(c[3])
1547 changedfiles.update(c[3])
1542 mfs.setdefault(c[0], x)
1548 mfs.setdefault(c[0], x)
1543 count[0] += 1
1549 count[0] += 1
1544 self.ui.progress(_('bundling'), count[0],
1550 self.ui.progress(_('bundling'), count[0],
1545 unit=_('changesets'), total=len(csets))
1551 unit=_('changesets'), total=len(csets))
1546 return x
1552 return x
1547 elif revlog == mf:
1553 elif revlog == mf:
1548 clnode = mfs[x]
1554 clnode = mfs[x]
1549 mdata = mf.readfast(x)
1555 mdata = mf.readfast(x)
1550 for f in changedfiles:
1556 for f in changedfiles:
1551 if f in mdata:
1557 if f in mdata:
1552 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1558 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1553 count[0] += 1
1559 count[0] += 1
1554 self.ui.progress(_('bundling'), count[0],
1560 self.ui.progress(_('bundling'), count[0],
1555 unit=_('manifests'), total=len(mfs))
1561 unit=_('manifests'), total=len(mfs))
1556 return mfs[x]
1562 return mfs[x]
1557 else:
1563 else:
1558 self.ui.progress(
1564 self.ui.progress(
1559 _('bundling'), count[0], item=fstate[0],
1565 _('bundling'), count[0], item=fstate[0],
1560 unit=_('files'), total=len(changedfiles))
1566 unit=_('files'), total=len(changedfiles))
1561 return fstate[1][x]
1567 return fstate[1][x]
1562
1568
1563 bundler = changegroup.bundle10(lookup)
1569 bundler = changegroup.bundle10(lookup)
1564 reorder = self.ui.config('bundle', 'reorder', 'auto')
1570 reorder = self.ui.config('bundle', 'reorder', 'auto')
1565 if reorder == 'auto':
1571 if reorder == 'auto':
1566 reorder = None
1572 reorder = None
1567 else:
1573 else:
1568 reorder = util.parsebool(reorder)
1574 reorder = util.parsebool(reorder)
1569
1575
1570 def gengroup():
1576 def gengroup():
1571 # Create a changenode group generator that will call our functions
1577 # Create a changenode group generator that will call our functions
1572 # back to lookup the owning changenode and collect information.
1578 # back to lookup the owning changenode and collect information.
1573 for chunk in cl.group(csets, bundler, reorder=reorder):
1579 for chunk in cl.group(csets, bundler, reorder=reorder):
1574 yield chunk
1580 yield chunk
1575 self.ui.progress(_('bundling'), None)
1581 self.ui.progress(_('bundling'), None)
1576
1582
1577 # Create a generator for the manifestnodes that calls our lookup
1583 # Create a generator for the manifestnodes that calls our lookup
1578 # and data collection functions back.
1584 # and data collection functions back.
1579 count[0] = 0
1585 count[0] = 0
1580 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1586 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1581 yield chunk
1587 yield chunk
1582 self.ui.progress(_('bundling'), None)
1588 self.ui.progress(_('bundling'), None)
1583
1589
1584 mfs.clear()
1590 mfs.clear()
1585
1591
1586 # Go through all our files in order sorted by name.
1592 # Go through all our files in order sorted by name.
1587 count[0] = 0
1593 count[0] = 0
1588 for fname in sorted(changedfiles):
1594 for fname in sorted(changedfiles):
1589 filerevlog = self.file(fname)
1595 filerevlog = self.file(fname)
1590 if not len(filerevlog):
1596 if not len(filerevlog):
1591 raise util.Abort(_("empty or missing revlog for %s") % fname)
1597 raise util.Abort(_("empty or missing revlog for %s") % fname)
1592 fstate[0] = fname
1598 fstate[0] = fname
1593 fstate[1] = fnodes.pop(fname, {})
1599 fstate[1] = fnodes.pop(fname, {})
1594
1600
1595 nodelist = prune(filerevlog, fstate[1])
1601 nodelist = prune(filerevlog, fstate[1])
1596 if nodelist:
1602 if nodelist:
1597 count[0] += 1
1603 count[0] += 1
1598 yield bundler.fileheader(fname)
1604 yield bundler.fileheader(fname)
1599 for chunk in filerevlog.group(nodelist, bundler, reorder):
1605 for chunk in filerevlog.group(nodelist, bundler, reorder):
1600 yield chunk
1606 yield chunk
1601
1607
1602 # Signal that no more groups are left.
1608 # Signal that no more groups are left.
1603 yield bundler.close()
1609 yield bundler.close()
1604 self.ui.progress(_('bundling'), None)
1610 self.ui.progress(_('bundling'), None)
1605
1611
1606 if csets:
1612 if csets:
1607 self.hook('outgoing', node=hex(csets[0]), source=source)
1613 self.hook('outgoing', node=hex(csets[0]), source=source)
1608
1614
1609 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1615 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1610
1616
1611 def changegroup(self, basenodes, source):
1617 def changegroup(self, basenodes, source):
1612 # to avoid a race we use changegroupsubset() (issue1320)
1618 # to avoid a race we use changegroupsubset() (issue1320)
1613 return self.changegroupsubset(basenodes, self.heads(), source)
1619 return self.changegroupsubset(basenodes, self.heads(), source)
1614
1620
1615 def _changegroup(self, nodes, source):
1621 def _changegroup(self, nodes, source):
1616 """Compute the changegroup of all nodes that we have that a recipient
1622 """Compute the changegroup of all nodes that we have that a recipient
1617 doesn't. Return a chunkbuffer object whose read() method will return
1623 doesn't. Return a chunkbuffer object whose read() method will return
1618 successive changegroup chunks.
1624 successive changegroup chunks.
1619
1625
1620 This is much easier than the previous function as we can assume that
1626 This is much easier than the previous function as we can assume that
1621 the recipient has any changenode we aren't sending them.
1627 the recipient has any changenode we aren't sending them.
1622
1628
1623 nodes is the set of nodes to send"""
1629 nodes is the set of nodes to send"""
1624
1630
1625 cl = self.changelog
1631 cl = self.changelog
1626 mf = self.manifest
1632 mf = self.manifest
1627 mfs = {}
1633 mfs = {}
1628 changedfiles = set()
1634 changedfiles = set()
1629 fstate = ['']
1635 fstate = ['']
1630 count = [0]
1636 count = [0]
1631
1637
1632 self.hook('preoutgoing', throw=True, source=source)
1638 self.hook('preoutgoing', throw=True, source=source)
1633 self.changegroupinfo(nodes, source)
1639 self.changegroupinfo(nodes, source)
1634
1640
1635 revset = set([cl.rev(n) for n in nodes])
1641 revset = set([cl.rev(n) for n in nodes])
1636
1642
1637 def gennodelst(log):
1643 def gennodelst(log):
1638 return [log.node(r) for r in log if log.linkrev(r) in revset]
1644 return [log.node(r) for r in log if log.linkrev(r) in revset]
1639
1645
1640 def lookup(revlog, x):
1646 def lookup(revlog, x):
1641 if revlog == cl:
1647 if revlog == cl:
1642 c = cl.read(x)
1648 c = cl.read(x)
1643 changedfiles.update(c[3])
1649 changedfiles.update(c[3])
1644 mfs.setdefault(c[0], x)
1650 mfs.setdefault(c[0], x)
1645 count[0] += 1
1651 count[0] += 1
1646 self.ui.progress(_('bundling'), count[0],
1652 self.ui.progress(_('bundling'), count[0],
1647 unit=_('changesets'), total=len(nodes))
1653 unit=_('changesets'), total=len(nodes))
1648 return x
1654 return x
1649 elif revlog == mf:
1655 elif revlog == mf:
1650 count[0] += 1
1656 count[0] += 1
1651 self.ui.progress(_('bundling'), count[0],
1657 self.ui.progress(_('bundling'), count[0],
1652 unit=_('manifests'), total=len(mfs))
1658 unit=_('manifests'), total=len(mfs))
1653 return cl.node(revlog.linkrev(revlog.rev(x)))
1659 return cl.node(revlog.linkrev(revlog.rev(x)))
1654 else:
1660 else:
1655 self.ui.progress(
1661 self.ui.progress(
1656 _('bundling'), count[0], item=fstate[0],
1662 _('bundling'), count[0], item=fstate[0],
1657 total=len(changedfiles), unit=_('files'))
1663 total=len(changedfiles), unit=_('files'))
1658 return cl.node(revlog.linkrev(revlog.rev(x)))
1664 return cl.node(revlog.linkrev(revlog.rev(x)))
1659
1665
1660 bundler = changegroup.bundle10(lookup)
1666 bundler = changegroup.bundle10(lookup)
1661 reorder = self.ui.config('bundle', 'reorder', 'auto')
1667 reorder = self.ui.config('bundle', 'reorder', 'auto')
1662 if reorder == 'auto':
1668 if reorder == 'auto':
1663 reorder = None
1669 reorder = None
1664 else:
1670 else:
1665 reorder = util.parsebool(reorder)
1671 reorder = util.parsebool(reorder)
1666
1672
1667 def gengroup():
1673 def gengroup():
1668 '''yield a sequence of changegroup chunks (strings)'''
1674 '''yield a sequence of changegroup chunks (strings)'''
1669 # construct a list of all changed files
1675 # construct a list of all changed files
1670
1676
1671 for chunk in cl.group(nodes, bundler, reorder=reorder):
1677 for chunk in cl.group(nodes, bundler, reorder=reorder):
1672 yield chunk
1678 yield chunk
1673 self.ui.progress(_('bundling'), None)
1679 self.ui.progress(_('bundling'), None)
1674
1680
1675 count[0] = 0
1681 count[0] = 0
1676 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1682 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1677 yield chunk
1683 yield chunk
1678 self.ui.progress(_('bundling'), None)
1684 self.ui.progress(_('bundling'), None)
1679
1685
1680 count[0] = 0
1686 count[0] = 0
1681 for fname in sorted(changedfiles):
1687 for fname in sorted(changedfiles):
1682 filerevlog = self.file(fname)
1688 filerevlog = self.file(fname)
1683 if not len(filerevlog):
1689 if not len(filerevlog):
1684 raise util.Abort(_("empty or missing revlog for %s") % fname)
1690 raise util.Abort(_("empty or missing revlog for %s") % fname)
1685 fstate[0] = fname
1691 fstate[0] = fname
1686 nodelist = gennodelst(filerevlog)
1692 nodelist = gennodelst(filerevlog)
1687 if nodelist:
1693 if nodelist:
1688 count[0] += 1
1694 count[0] += 1
1689 yield bundler.fileheader(fname)
1695 yield bundler.fileheader(fname)
1690 for chunk in filerevlog.group(nodelist, bundler, reorder):
1696 for chunk in filerevlog.group(nodelist, bundler, reorder):
1691 yield chunk
1697 yield chunk
1692 yield bundler.close()
1698 yield bundler.close()
1693 self.ui.progress(_('bundling'), None)
1699 self.ui.progress(_('bundling'), None)
1694
1700
1695 if nodes:
1701 if nodes:
1696 self.hook('outgoing', node=hex(nodes[0]), source=source)
1702 self.hook('outgoing', node=hex(nodes[0]), source=source)
1697
1703
1698 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1704 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1699
1705
1700 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1706 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1701 """Add the changegroup returned by source.read() to this repo.
1707 """Add the changegroup returned by source.read() to this repo.
1702 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1708 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1703 the URL of the repo where this changegroup is coming from.
1709 the URL of the repo where this changegroup is coming from.
1704 If lock is not None, the function takes ownership of the lock
1710 If lock is not None, the function takes ownership of the lock
1705 and releases it after the changegroup is added.
1711 and releases it after the changegroup is added.
1706
1712
1707 Return an integer summarizing the change to this repo:
1713 Return an integer summarizing the change to this repo:
1708 - nothing changed or no source: 0
1714 - nothing changed or no source: 0
1709 - more heads than before: 1+added heads (2..n)
1715 - more heads than before: 1+added heads (2..n)
1710 - fewer heads than before: -1-removed heads (-2..-n)
1716 - fewer heads than before: -1-removed heads (-2..-n)
1711 - number of heads stays the same: 1
1717 - number of heads stays the same: 1
1712 """
1718 """
1713 def csmap(x):
1719 def csmap(x):
1714 self.ui.debug("add changeset %s\n" % short(x))
1720 self.ui.debug("add changeset %s\n" % short(x))
1715 return len(cl)
1721 return len(cl)
1716
1722
1717 def revmap(x):
1723 def revmap(x):
1718 return cl.rev(x)
1724 return cl.rev(x)
1719
1725
1720 if not source:
1726 if not source:
1721 return 0
1727 return 0
1722
1728
1723 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1729 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1724
1730
1725 changesets = files = revisions = 0
1731 changesets = files = revisions = 0
1726 efiles = set()
1732 efiles = set()
1727
1733
1728 # write changelog data to temp files so concurrent readers will not see
1734 # write changelog data to temp files so concurrent readers will not see
1729 # inconsistent view
1735 # inconsistent view
1730 cl = self.changelog
1736 cl = self.changelog
1731 cl.delayupdate()
1737 cl.delayupdate()
1732 oldheads = cl.heads()
1738 oldheads = cl.heads()
1733
1739
1734 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1740 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1735 try:
1741 try:
1736 trp = weakref.proxy(tr)
1742 trp = weakref.proxy(tr)
1737 # pull off the changeset group
1743 # pull off the changeset group
1738 self.ui.status(_("adding changesets\n"))
1744 self.ui.status(_("adding changesets\n"))
1739 clstart = len(cl)
1745 clstart = len(cl)
1740 class prog(object):
1746 class prog(object):
1741 step = _('changesets')
1747 step = _('changesets')
1742 count = 1
1748 count = 1
1743 ui = self.ui
1749 ui = self.ui
1744 total = None
1750 total = None
1745 def __call__(self):
1751 def __call__(self):
1746 self.ui.progress(self.step, self.count, unit=_('chunks'),
1752 self.ui.progress(self.step, self.count, unit=_('chunks'),
1747 total=self.total)
1753 total=self.total)
1748 self.count += 1
1754 self.count += 1
1749 pr = prog()
1755 pr = prog()
1750 source.callback = pr
1756 source.callback = pr
1751
1757
1752 source.changelogheader()
1758 source.changelogheader()
1753 if (cl.addgroup(source, csmap, trp) is None
1759 if (cl.addgroup(source, csmap, trp) is None
1754 and not emptyok):
1760 and not emptyok):
1755 raise util.Abort(_("received changelog group is empty"))
1761 raise util.Abort(_("received changelog group is empty"))
1756 clend = len(cl)
1762 clend = len(cl)
1757 changesets = clend - clstart
1763 changesets = clend - clstart
1758 for c in xrange(clstart, clend):
1764 for c in xrange(clstart, clend):
1759 efiles.update(self[c].files())
1765 efiles.update(self[c].files())
1760 efiles = len(efiles)
1766 efiles = len(efiles)
1761 self.ui.progress(_('changesets'), None)
1767 self.ui.progress(_('changesets'), None)
1762
1768
1763 # pull off the manifest group
1769 # pull off the manifest group
1764 self.ui.status(_("adding manifests\n"))
1770 self.ui.status(_("adding manifests\n"))
1765 pr.step = _('manifests')
1771 pr.step = _('manifests')
1766 pr.count = 1
1772 pr.count = 1
1767 pr.total = changesets # manifests <= changesets
1773 pr.total = changesets # manifests <= changesets
1768 # no need to check for empty manifest group here:
1774 # no need to check for empty manifest group here:
1769 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1775 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1770 # no new manifest will be created and the manifest group will
1776 # no new manifest will be created and the manifest group will
1771 # be empty during the pull
1777 # be empty during the pull
1772 source.manifestheader()
1778 source.manifestheader()
1773 self.manifest.addgroup(source, revmap, trp)
1779 self.manifest.addgroup(source, revmap, trp)
1774 self.ui.progress(_('manifests'), None)
1780 self.ui.progress(_('manifests'), None)
1775
1781
1776 needfiles = {}
1782 needfiles = {}
1777 if self.ui.configbool('server', 'validate', default=False):
1783 if self.ui.configbool('server', 'validate', default=False):
1778 # validate incoming csets have their manifests
1784 # validate incoming csets have their manifests
1779 for cset in xrange(clstart, clend):
1785 for cset in xrange(clstart, clend):
1780 mfest = self.changelog.read(self.changelog.node(cset))[0]
1786 mfest = self.changelog.read(self.changelog.node(cset))[0]
1781 mfest = self.manifest.readdelta(mfest)
1787 mfest = self.manifest.readdelta(mfest)
1782 # store file nodes we must see
1788 # store file nodes we must see
1783 for f, n in mfest.iteritems():
1789 for f, n in mfest.iteritems():
1784 needfiles.setdefault(f, set()).add(n)
1790 needfiles.setdefault(f, set()).add(n)
1785
1791
1786 # process the files
1792 # process the files
1787 self.ui.status(_("adding file changes\n"))
1793 self.ui.status(_("adding file changes\n"))
1788 pr.step = _('files')
1794 pr.step = _('files')
1789 pr.count = 1
1795 pr.count = 1
1790 pr.total = efiles
1796 pr.total = efiles
1791 source.callback = None
1797 source.callback = None
1792
1798
1793 while True:
1799 while True:
1794 chunkdata = source.filelogheader()
1800 chunkdata = source.filelogheader()
1795 if not chunkdata:
1801 if not chunkdata:
1796 break
1802 break
1797 f = chunkdata["filename"]
1803 f = chunkdata["filename"]
1798 self.ui.debug("adding %s revisions\n" % f)
1804 self.ui.debug("adding %s revisions\n" % f)
1799 pr()
1805 pr()
1800 fl = self.file(f)
1806 fl = self.file(f)
1801 o = len(fl)
1807 o = len(fl)
1802 if fl.addgroup(source, revmap, trp) is None:
1808 if fl.addgroup(source, revmap, trp) is None:
1803 raise util.Abort(_("received file revlog group is empty"))
1809 raise util.Abort(_("received file revlog group is empty"))
1804 revisions += len(fl) - o
1810 revisions += len(fl) - o
1805 files += 1
1811 files += 1
1806 if f in needfiles:
1812 if f in needfiles:
1807 needs = needfiles[f]
1813 needs = needfiles[f]
1808 for new in xrange(o, len(fl)):
1814 for new in xrange(o, len(fl)):
1809 n = fl.node(new)
1815 n = fl.node(new)
1810 if n in needs:
1816 if n in needs:
1811 needs.remove(n)
1817 needs.remove(n)
1812 if not needs:
1818 if not needs:
1813 del needfiles[f]
1819 del needfiles[f]
1814 self.ui.progress(_('files'), None)
1820 self.ui.progress(_('files'), None)
1815
1821
1816 for f, needs in needfiles.iteritems():
1822 for f, needs in needfiles.iteritems():
1817 fl = self.file(f)
1823 fl = self.file(f)
1818 for n in needs:
1824 for n in needs:
1819 try:
1825 try:
1820 fl.rev(n)
1826 fl.rev(n)
1821 except error.LookupError:
1827 except error.LookupError:
1822 raise util.Abort(
1828 raise util.Abort(
1823 _('missing file data for %s:%s - run hg verify') %
1829 _('missing file data for %s:%s - run hg verify') %
1824 (f, hex(n)))
1830 (f, hex(n)))
1825
1831
1826 dh = 0
1832 dh = 0
1827 if oldheads:
1833 if oldheads:
1828 heads = cl.heads()
1834 heads = cl.heads()
1829 dh = len(heads) - len(oldheads)
1835 dh = len(heads) - len(oldheads)
1830 for h in heads:
1836 for h in heads:
1831 if h not in oldheads and 'close' in self[h].extra():
1837 if h not in oldheads and 'close' in self[h].extra():
1832 dh -= 1
1838 dh -= 1
1833 htext = ""
1839 htext = ""
1834 if dh:
1840 if dh:
1835 htext = _(" (%+d heads)") % dh
1841 htext = _(" (%+d heads)") % dh
1836
1842
1837 self.ui.status(_("added %d changesets"
1843 self.ui.status(_("added %d changesets"
1838 " with %d changes to %d files%s\n")
1844 " with %d changes to %d files%s\n")
1839 % (changesets, revisions, files, htext))
1845 % (changesets, revisions, files, htext))
1840
1846
1841 if changesets > 0:
1847 if changesets > 0:
1842 p = lambda: cl.writepending() and self.root or ""
1848 p = lambda: cl.writepending() and self.root or ""
1843 self.hook('pretxnchangegroup', throw=True,
1849 self.hook('pretxnchangegroup', throw=True,
1844 node=hex(cl.node(clstart)), source=srctype,
1850 node=hex(cl.node(clstart)), source=srctype,
1845 url=url, pending=p)
1851 url=url, pending=p)
1846
1852
1847 # make changelog see real files again
1853 # make changelog see real files again
1848 cl.finalize(trp)
1854 cl.finalize(trp)
1849
1855
1850 tr.close()
1856 tr.close()
1851 finally:
1857 finally:
1852 tr.release()
1858 tr.release()
1853 if lock:
1859 if lock:
1854 lock.release()
1860 lock.release()
1855
1861
1856 if changesets > 0:
1862 if changesets > 0:
1857 # forcefully update the on-disk branch cache
1863 # forcefully update the on-disk branch cache
1858 self.ui.debug("updating the branch cache\n")
1864 self.ui.debug("updating the branch cache\n")
1859 self.updatebranchcache()
1865 self.updatebranchcache()
1860 self.hook("changegroup", node=hex(cl.node(clstart)),
1866 self.hook("changegroup", node=hex(cl.node(clstart)),
1861 source=srctype, url=url)
1867 source=srctype, url=url)
1862
1868
1863 for i in xrange(clstart, clend):
1869 for i in xrange(clstart, clend):
1864 self.hook("incoming", node=hex(cl.node(i)),
1870 self.hook("incoming", node=hex(cl.node(i)),
1865 source=srctype, url=url)
1871 source=srctype, url=url)
1866
1872
1867 # never return 0 here:
1873 # never return 0 here:
1868 if dh < 0:
1874 if dh < 0:
1869 return dh - 1
1875 return dh - 1
1870 else:
1876 else:
1871 return dh + 1
1877 return dh + 1
1872
1878
1873 def stream_in(self, remote, requirements):
1879 def stream_in(self, remote, requirements):
1874 lock = self.lock()
1880 lock = self.lock()
1875 try:
1881 try:
1876 fp = remote.stream_out()
1882 fp = remote.stream_out()
1877 l = fp.readline()
1883 l = fp.readline()
1878 try:
1884 try:
1879 resp = int(l)
1885 resp = int(l)
1880 except ValueError:
1886 except ValueError:
1881 raise error.ResponseError(
1887 raise error.ResponseError(
1882 _('Unexpected response from remote server:'), l)
1888 _('Unexpected response from remote server:'), l)
1883 if resp == 1:
1889 if resp == 1:
1884 raise util.Abort(_('operation forbidden by server'))
1890 raise util.Abort(_('operation forbidden by server'))
1885 elif resp == 2:
1891 elif resp == 2:
1886 raise util.Abort(_('locking the remote repository failed'))
1892 raise util.Abort(_('locking the remote repository failed'))
1887 elif resp != 0:
1893 elif resp != 0:
1888 raise util.Abort(_('the server sent an unknown error code'))
1894 raise util.Abort(_('the server sent an unknown error code'))
1889 self.ui.status(_('streaming all changes\n'))
1895 self.ui.status(_('streaming all changes\n'))
1890 l = fp.readline()
1896 l = fp.readline()
1891 try:
1897 try:
1892 total_files, total_bytes = map(int, l.split(' ', 1))
1898 total_files, total_bytes = map(int, l.split(' ', 1))
1893 except (ValueError, TypeError):
1899 except (ValueError, TypeError):
1894 raise error.ResponseError(
1900 raise error.ResponseError(
1895 _('Unexpected response from remote server:'), l)
1901 _('Unexpected response from remote server:'), l)
1896 self.ui.status(_('%d files to transfer, %s of data\n') %
1902 self.ui.status(_('%d files to transfer, %s of data\n') %
1897 (total_files, util.bytecount(total_bytes)))
1903 (total_files, util.bytecount(total_bytes)))
1898 start = time.time()
1904 start = time.time()
1899 for i in xrange(total_files):
1905 for i in xrange(total_files):
1900 # XXX doesn't support '\n' or '\r' in filenames
1906 # XXX doesn't support '\n' or '\r' in filenames
1901 l = fp.readline()
1907 l = fp.readline()
1902 try:
1908 try:
1903 name, size = l.split('\0', 1)
1909 name, size = l.split('\0', 1)
1904 size = int(size)
1910 size = int(size)
1905 except (ValueError, TypeError):
1911 except (ValueError, TypeError):
1906 raise error.ResponseError(
1912 raise error.ResponseError(
1907 _('Unexpected response from remote server:'), l)
1913 _('Unexpected response from remote server:'), l)
1908 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1914 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1909 # for backwards compat, name was partially encoded
1915 # for backwards compat, name was partially encoded
1910 ofp = self.sopener(store.decodedir(name), 'w')
1916 ofp = self.sopener(store.decodedir(name), 'w')
1911 for chunk in util.filechunkiter(fp, limit=size):
1917 for chunk in util.filechunkiter(fp, limit=size):
1912 ofp.write(chunk)
1918 ofp.write(chunk)
1913 ofp.close()
1919 ofp.close()
1914 elapsed = time.time() - start
1920 elapsed = time.time() - start
1915 if elapsed <= 0:
1921 if elapsed <= 0:
1916 elapsed = 0.001
1922 elapsed = 0.001
1917 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1923 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1918 (util.bytecount(total_bytes), elapsed,
1924 (util.bytecount(total_bytes), elapsed,
1919 util.bytecount(total_bytes / elapsed)))
1925 util.bytecount(total_bytes / elapsed)))
1920
1926
1921 # new requirements = old non-format requirements + new format-related
1927 # new requirements = old non-format requirements + new format-related
1922 # requirements from the streamed-in repository
1928 # requirements from the streamed-in repository
1923 requirements.update(set(self.requirements) - self.supportedformats)
1929 requirements.update(set(self.requirements) - self.supportedformats)
1924 self._applyrequirements(requirements)
1930 self._applyrequirements(requirements)
1925 self._writerequirements()
1931 self._writerequirements()
1926
1932
1927 self.invalidate()
1933 self.invalidate()
1928 return len(self.heads()) + 1
1934 return len(self.heads()) + 1
1929 finally:
1935 finally:
1930 lock.release()
1936 lock.release()
1931
1937
1932 def clone(self, remote, heads=[], stream=False):
1938 def clone(self, remote, heads=[], stream=False):
1933 '''clone remote repository.
1939 '''clone remote repository.
1934
1940
1935 keyword arguments:
1941 keyword arguments:
1936 heads: list of revs to clone (forces use of pull)
1942 heads: list of revs to clone (forces use of pull)
1937 stream: use streaming clone if possible'''
1943 stream: use streaming clone if possible'''
1938
1944
1939 # now, all clients that can request uncompressed clones can
1945 # now, all clients that can request uncompressed clones can
1940 # read repo formats supported by all servers that can serve
1946 # read repo formats supported by all servers that can serve
1941 # them.
1947 # them.
1942
1948
1943 # if revlog format changes, client will have to check version
1949 # if revlog format changes, client will have to check version
1944 # and format flags on "stream" capability, and use
1950 # and format flags on "stream" capability, and use
1945 # uncompressed only if compatible.
1951 # uncompressed only if compatible.
1946
1952
1947 if stream and not heads:
1953 if stream and not heads:
1948 # 'stream' means remote revlog format is revlogv1 only
1954 # 'stream' means remote revlog format is revlogv1 only
1949 if remote.capable('stream'):
1955 if remote.capable('stream'):
1950 return self.stream_in(remote, set(('revlogv1',)))
1956 return self.stream_in(remote, set(('revlogv1',)))
1951 # otherwise, 'streamreqs' contains the remote revlog format
1957 # otherwise, 'streamreqs' contains the remote revlog format
1952 streamreqs = remote.capable('streamreqs')
1958 streamreqs = remote.capable('streamreqs')
1953 if streamreqs:
1959 if streamreqs:
1954 streamreqs = set(streamreqs.split(','))
1960 streamreqs = set(streamreqs.split(','))
1955 # if we support it, stream in and adjust our requirements
1961 # if we support it, stream in and adjust our requirements
1956 if not streamreqs - self.supportedformats:
1962 if not streamreqs - self.supportedformats:
1957 return self.stream_in(remote, streamreqs)
1963 return self.stream_in(remote, streamreqs)
1958 return self.pull(remote, heads)
1964 return self.pull(remote, heads)
1959
1965
1960 def pushkey(self, namespace, key, old, new):
1966 def pushkey(self, namespace, key, old, new):
1961 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1967 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1962 old=old, new=new)
1968 old=old, new=new)
1963 ret = pushkey.push(self, namespace, key, old, new)
1969 ret = pushkey.push(self, namespace, key, old, new)
1964 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1970 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1965 ret=ret)
1971 ret=ret)
1966 return ret
1972 return ret
1967
1973
1968 def listkeys(self, namespace):
1974 def listkeys(self, namespace):
1969 self.hook('prelistkeys', throw=True, namespace=namespace)
1975 self.hook('prelistkeys', throw=True, namespace=namespace)
1970 values = pushkey.list(self, namespace)
1976 values = pushkey.list(self, namespace)
1971 self.hook('listkeys', namespace=namespace, values=values)
1977 self.hook('listkeys', namespace=namespace, values=values)
1972 return values
1978 return values
1973
1979
1974 def debugwireargs(self, one, two, three=None, four=None, five=None):
1980 def debugwireargs(self, one, two, three=None, four=None, five=None):
1975 '''used to test argument passing over the wire'''
1981 '''used to test argument passing over the wire'''
1976 return "%s %s %s %s %s" % (one, two, three, four, five)
1982 return "%s %s %s %s %s" % (one, two, three, four, five)
1977
1983
1978 def savecommitmessage(self, text):
1984 def savecommitmessage(self, text):
1979 fp = self.opener('last-message.txt', 'wb')
1985 fp = self.opener('last-message.txt', 'wb')
1980 try:
1986 try:
1981 fp.write(text)
1987 fp.write(text)
1982 finally:
1988 finally:
1983 fp.close()
1989 fp.close()
1984 return self.pathto(fp.name[len(self.root)+1:])
1990 return self.pathto(fp.name[len(self.root)+1:])
1985
1991
1986 # used to avoid circular references so destructors work
1992 # used to avoid circular references so destructors work
1987 def aftertrans(files):
1993 def aftertrans(files):
1988 renamefiles = [tuple(t) for t in files]
1994 renamefiles = [tuple(t) for t in files]
1989 def a():
1995 def a():
1990 for src, dest in renamefiles:
1996 for src, dest in renamefiles:
1991 util.rename(src, dest)
1997 util.rename(src, dest)
1992 return a
1998 return a
1993
1999
1994 def undoname(fn):
2000 def undoname(fn):
1995 base, name = os.path.split(fn)
2001 base, name = os.path.split(fn)
1996 assert name.startswith('journal')
2002 assert name.startswith('journal')
1997 return os.path.join(base, name.replace('journal', 'undo', 1))
2003 return os.path.join(base, name.replace('journal', 'undo', 1))
1998
2004
1999 def instance(ui, path, create):
2005 def instance(ui, path, create):
2000 return localrepository(ui, util.urllocalpath(path), create)
2006 return localrepository(ui, util.urllocalpath(path), create)
2001
2007
2002 def islocal(path):
2008 def islocal(path):
2003 return True
2009 return True
@@ -1,141 +1,142 b''
1 # statichttprepo.py - simple http repository class for mercurial
1 # statichttprepo.py - simple http repository class for mercurial
2 #
2 #
3 # This provides read-only repo access to repositories exported via static http
3 # This provides read-only repo access to repositories exported via static http
4 #
4 #
5 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 from i18n import _
10 from i18n import _
11 import changelog, byterange, url, error
11 import changelog, byterange, url, error
12 import localrepo, manifest, util, scmutil, store
12 import localrepo, manifest, util, scmutil, store
13 import urllib, urllib2, errno
13 import urllib, urllib2, errno
14
14
15 class httprangereader(object):
15 class httprangereader(object):
16 def __init__(self, url, opener):
16 def __init__(self, url, opener):
17 # we assume opener has HTTPRangeHandler
17 # we assume opener has HTTPRangeHandler
18 self.url = url
18 self.url = url
19 self.pos = 0
19 self.pos = 0
20 self.opener = opener
20 self.opener = opener
21 self.name = url
21 self.name = url
22 def seek(self, pos):
22 def seek(self, pos):
23 self.pos = pos
23 self.pos = pos
24 def read(self, bytes=None):
24 def read(self, bytes=None):
25 req = urllib2.Request(self.url)
25 req = urllib2.Request(self.url)
26 end = ''
26 end = ''
27 if bytes:
27 if bytes:
28 end = self.pos + bytes - 1
28 end = self.pos + bytes - 1
29 req.add_header('Range', 'bytes=%d-%s' % (self.pos, end))
29 req.add_header('Range', 'bytes=%d-%s' % (self.pos, end))
30
30
31 try:
31 try:
32 f = self.opener.open(req)
32 f = self.opener.open(req)
33 data = f.read()
33 data = f.read()
34 if hasattr(f, 'getcode'):
34 if hasattr(f, 'getcode'):
35 # python 2.6+
35 # python 2.6+
36 code = f.getcode()
36 code = f.getcode()
37 elif hasattr(f, 'code'):
37 elif hasattr(f, 'code'):
38 # undocumented attribute, seems to be set in 2.4 and 2.5
38 # undocumented attribute, seems to be set in 2.4 and 2.5
39 code = f.code
39 code = f.code
40 else:
40 else:
41 # Don't know how to check, hope for the best.
41 # Don't know how to check, hope for the best.
42 code = 206
42 code = 206
43 except urllib2.HTTPError, inst:
43 except urllib2.HTTPError, inst:
44 num = inst.code == 404 and errno.ENOENT or None
44 num = inst.code == 404 and errno.ENOENT or None
45 raise IOError(num, inst)
45 raise IOError(num, inst)
46 except urllib2.URLError, inst:
46 except urllib2.URLError, inst:
47 raise IOError(None, inst.reason[1])
47 raise IOError(None, inst.reason[1])
48
48
49 if code == 200:
49 if code == 200:
50 # HTTPRangeHandler does nothing if remote does not support
50 # HTTPRangeHandler does nothing if remote does not support
51 # Range headers and returns the full entity. Let's slice it.
51 # Range headers and returns the full entity. Let's slice it.
52 if bytes:
52 if bytes:
53 data = data[self.pos:self.pos + bytes]
53 data = data[self.pos:self.pos + bytes]
54 else:
54 else:
55 data = data[self.pos:]
55 data = data[self.pos:]
56 elif bytes:
56 elif bytes:
57 data = data[:bytes]
57 data = data[:bytes]
58 self.pos += len(data)
58 self.pos += len(data)
59 return data
59 return data
60 def __iter__(self):
60 def __iter__(self):
61 return iter(self.read().splitlines(1))
61 return iter(self.read().splitlines(1))
62 def close(self):
62 def close(self):
63 pass
63 pass
64
64
65 def build_opener(ui, authinfo):
65 def build_opener(ui, authinfo):
66 # urllib cannot handle URLs with embedded user or passwd
66 # urllib cannot handle URLs with embedded user or passwd
67 urlopener = url.opener(ui, authinfo)
67 urlopener = url.opener(ui, authinfo)
68 urlopener.add_handler(byterange.HTTPRangeHandler())
68 urlopener.add_handler(byterange.HTTPRangeHandler())
69
69
70 class statichttpopener(scmutil.abstractopener):
70 class statichttpopener(scmutil.abstractopener):
71 def __init__(self, base):
71 def __init__(self, base):
72 self.base = base
72 self.base = base
73
73
74 def __call__(self, path, mode="r", atomictemp=None):
74 def __call__(self, path, mode="r", atomictemp=None):
75 if mode not in ('r', 'rb'):
75 if mode not in ('r', 'rb'):
76 raise IOError('Permission denied')
76 raise IOError('Permission denied')
77 f = "/".join((self.base, urllib.quote(path)))
77 f = "/".join((self.base, urllib.quote(path)))
78 return httprangereader(f, urlopener)
78 return httprangereader(f, urlopener)
79
79
80 return statichttpopener
80 return statichttpopener
81
81
82 class statichttprepository(localrepo.localrepository):
82 class statichttprepository(localrepo.localrepository):
83 def __init__(self, ui, path):
83 def __init__(self, ui, path):
84 self._url = path
84 self._url = path
85 self.ui = ui
85 self.ui = ui
86
86
87 self.root = path
87 self.root = path
88 u = util.url(path.rstrip('/') + "/.hg")
88 u = util.url(path.rstrip('/') + "/.hg")
89 self.path, authinfo = u.authinfo()
89 self.path, authinfo = u.authinfo()
90
90
91 opener = build_opener(ui, authinfo)
91 opener = build_opener(ui, authinfo)
92 self.opener = opener(self.path)
92 self.opener = opener(self.path)
93
93
94 try:
94 try:
95 requirements = scmutil.readrequires(self.opener, self.supported)
95 requirements = scmutil.readrequires(self.opener, self.supported)
96 except IOError, inst:
96 except IOError, inst:
97 if inst.errno != errno.ENOENT:
97 if inst.errno != errno.ENOENT:
98 raise
98 raise
99 requirements = set()
99 requirements = set()
100
100
101 # check if it is a non-empty old-style repository
101 # check if it is a non-empty old-style repository
102 try:
102 try:
103 fp = self.opener("00changelog.i")
103 fp = self.opener("00changelog.i")
104 fp.read(1)
104 fp.read(1)
105 fp.close()
105 fp.close()
106 except IOError, inst:
106 except IOError, inst:
107 if inst.errno != errno.ENOENT:
107 if inst.errno != errno.ENOENT:
108 raise
108 raise
109 # we do not care about empty old-style repositories here
109 # we do not care about empty old-style repositories here
110 msg = _("'%s' does not appear to be an hg repository") % path
110 msg = _("'%s' does not appear to be an hg repository") % path
111 raise error.RepoError(msg)
111 raise error.RepoError(msg)
112
112
113 # setup store
113 # setup store
114 self.store = store.store(requirements, self.path, opener)
114 self.store = store.store(requirements, self.path, opener)
115 self.spath = self.store.path
115 self.spath = self.store.path
116 self.sopener = self.store.opener
116 self.sopener = self.store.opener
117 self.sjoin = self.store.join
117 self.sjoin = self.store.join
118
118
119 self.manifest = manifest.manifest(self.sopener)
119 self.manifest = manifest.manifest(self.sopener)
120 self.changelog = changelog.changelog(self.sopener)
120 self.changelog = changelog.changelog(self.sopener)
121 self._tags = None
121 self._tags = None
122 self.nodetagscache = None
122 self.nodetagscache = None
123 self._branchcache = None
123 self._branchcache = None
124 self._branchcachetip = None
124 self._branchcachetip = None
125 self.encodepats = None
125 self.encodepats = None
126 self.decodepats = None
126 self.decodepats = None
127 self.capabilities.difference_update(["pushkey"])
127 self.capabilities.difference_update(["pushkey"])
128 self._filecache = {}
128
129
129 def url(self):
130 def url(self):
130 return self._url
131 return self._url
131
132
132 def local(self):
133 def local(self):
133 return False
134 return False
134
135
135 def lock(self, wait=True):
136 def lock(self, wait=True):
136 raise util.Abort(_('cannot lock static-http repository'))
137 raise util.Abort(_('cannot lock static-http repository'))
137
138
138 def instance(ui, path, create):
139 def instance(ui, path, create):
139 if create:
140 if create:
140 raise util.Abort(_('cannot create new static-http repository'))
141 raise util.Abort(_('cannot create new static-http repository'))
141 return statichttprepository(ui, path[7:])
142 return statichttprepository(ui, path[7:])
General Comments 0
You need to be logged in to leave comments. Login now