##// END OF EJS Templates
repository: drop unused rjoin() method...
Patrick Mezard -
r12035:ff104423 default
parent child Browse files
Show More
@@ -1,1806 +1,1803 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 import url as urlmod
17 import url as urlmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect
19 import weakref, errno, os, time, inspect
20 propertycache = util.propertycache
20 propertycache = util.propertycache
21
21
22 class localrepository(repo.repository):
22 class localrepository(repo.repository):
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey'))
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey'))
24 supported = set('revlogv1 store fncache shared parentdelta'.split())
24 supported = set('revlogv1 store fncache shared parentdelta'.split())
25
25
26 def __init__(self, baseui, path=None, create=0):
26 def __init__(self, baseui, path=None, create=0):
27 repo.repository.__init__(self)
27 repo.repository.__init__(self)
28 self.root = os.path.realpath(util.expandpath(path))
28 self.root = os.path.realpath(util.expandpath(path))
29 self.path = os.path.join(self.root, ".hg")
29 self.path = os.path.join(self.root, ".hg")
30 self.origroot = path
30 self.origroot = path
31 self.opener = util.opener(self.path)
31 self.opener = util.opener(self.path)
32 self.wopener = util.opener(self.root)
32 self.wopener = util.opener(self.root)
33 self.baseui = baseui
33 self.baseui = baseui
34 self.ui = baseui.copy()
34 self.ui = baseui.copy()
35
35
36 try:
36 try:
37 self.ui.readconfig(self.join("hgrc"), self.root)
37 self.ui.readconfig(self.join("hgrc"), self.root)
38 extensions.loadall(self.ui)
38 extensions.loadall(self.ui)
39 except IOError:
39 except IOError:
40 pass
40 pass
41
41
42 if not os.path.isdir(self.path):
42 if not os.path.isdir(self.path):
43 if create:
43 if create:
44 if not os.path.exists(path):
44 if not os.path.exists(path):
45 util.makedirs(path)
45 util.makedirs(path)
46 os.mkdir(self.path)
46 os.mkdir(self.path)
47 requirements = ["revlogv1"]
47 requirements = ["revlogv1"]
48 if self.ui.configbool('format', 'usestore', True):
48 if self.ui.configbool('format', 'usestore', True):
49 os.mkdir(os.path.join(self.path, "store"))
49 os.mkdir(os.path.join(self.path, "store"))
50 requirements.append("store")
50 requirements.append("store")
51 if self.ui.configbool('format', 'usefncache', True):
51 if self.ui.configbool('format', 'usefncache', True):
52 requirements.append("fncache")
52 requirements.append("fncache")
53 # create an invalid changelog
53 # create an invalid changelog
54 self.opener("00changelog.i", "a").write(
54 self.opener("00changelog.i", "a").write(
55 '\0\0\0\2' # represents revlogv2
55 '\0\0\0\2' # represents revlogv2
56 ' dummy changelog to prevent using the old repo layout'
56 ' dummy changelog to prevent using the old repo layout'
57 )
57 )
58 if self.ui.configbool('format', 'parentdelta', False):
58 if self.ui.configbool('format', 'parentdelta', False):
59 requirements.append("parentdelta")
59 requirements.append("parentdelta")
60 reqfile = self.opener("requires", "w")
60 reqfile = self.opener("requires", "w")
61 for r in requirements:
61 for r in requirements:
62 reqfile.write("%s\n" % r)
62 reqfile.write("%s\n" % r)
63 reqfile.close()
63 reqfile.close()
64 else:
64 else:
65 raise error.RepoError(_("repository %s not found") % path)
65 raise error.RepoError(_("repository %s not found") % path)
66 elif create:
66 elif create:
67 raise error.RepoError(_("repository %s already exists") % path)
67 raise error.RepoError(_("repository %s already exists") % path)
68 else:
68 else:
69 # find requirements
69 # find requirements
70 requirements = set()
70 requirements = set()
71 try:
71 try:
72 requirements = set(self.opener("requires").read().splitlines())
72 requirements = set(self.opener("requires").read().splitlines())
73 except IOError, inst:
73 except IOError, inst:
74 if inst.errno != errno.ENOENT:
74 if inst.errno != errno.ENOENT:
75 raise
75 raise
76 for r in requirements - self.supported:
76 for r in requirements - self.supported:
77 raise error.RepoError(_("requirement '%s' not supported") % r)
77 raise error.RepoError(_("requirement '%s' not supported") % r)
78
78
79 self.sharedpath = self.path
79 self.sharedpath = self.path
80 try:
80 try:
81 s = os.path.realpath(self.opener("sharedpath").read())
81 s = os.path.realpath(self.opener("sharedpath").read())
82 if not os.path.exists(s):
82 if not os.path.exists(s):
83 raise error.RepoError(
83 raise error.RepoError(
84 _('.hg/sharedpath points to nonexistent directory %s') % s)
84 _('.hg/sharedpath points to nonexistent directory %s') % s)
85 self.sharedpath = s
85 self.sharedpath = s
86 except IOError, inst:
86 except IOError, inst:
87 if inst.errno != errno.ENOENT:
87 if inst.errno != errno.ENOENT:
88 raise
88 raise
89
89
90 self.store = store.store(requirements, self.sharedpath, util.opener)
90 self.store = store.store(requirements, self.sharedpath, util.opener)
91 self.spath = self.store.path
91 self.spath = self.store.path
92 self.sopener = self.store.opener
92 self.sopener = self.store.opener
93 self.sjoin = self.store.join
93 self.sjoin = self.store.join
94 self.opener.createmode = self.store.createmode
94 self.opener.createmode = self.store.createmode
95 self.sopener.options = {}
95 self.sopener.options = {}
96 if 'parentdelta' in requirements:
96 if 'parentdelta' in requirements:
97 self.sopener.options['parentdelta'] = 1
97 self.sopener.options['parentdelta'] = 1
98
98
99 # These two define the set of tags for this repository. _tags
99 # These two define the set of tags for this repository. _tags
100 # maps tag name to node; _tagtypes maps tag name to 'global' or
100 # maps tag name to node; _tagtypes maps tag name to 'global' or
101 # 'local'. (Global tags are defined by .hgtags across all
101 # 'local'. (Global tags are defined by .hgtags across all
102 # heads, and local tags are defined in .hg/localtags.) They
102 # heads, and local tags are defined in .hg/localtags.) They
103 # constitute the in-memory cache of tags.
103 # constitute the in-memory cache of tags.
104 self._tags = None
104 self._tags = None
105 self._tagtypes = None
105 self._tagtypes = None
106
106
107 self._branchcache = None # in UTF-8
107 self._branchcache = None # in UTF-8
108 self._branchcachetip = None
108 self._branchcachetip = None
109 self.nodetagscache = None
109 self.nodetagscache = None
110 self.filterpats = {}
110 self.filterpats = {}
111 self._datafilters = {}
111 self._datafilters = {}
112 self._transref = self._lockref = self._wlockref = None
112 self._transref = self._lockref = self._wlockref = None
113
113
114 @propertycache
114 @propertycache
115 def changelog(self):
115 def changelog(self):
116 c = changelog.changelog(self.sopener)
116 c = changelog.changelog(self.sopener)
117 if 'HG_PENDING' in os.environ:
117 if 'HG_PENDING' in os.environ:
118 p = os.environ['HG_PENDING']
118 p = os.environ['HG_PENDING']
119 if p.startswith(self.root):
119 if p.startswith(self.root):
120 c.readpending('00changelog.i.a')
120 c.readpending('00changelog.i.a')
121 self.sopener.options['defversion'] = c.version
121 self.sopener.options['defversion'] = c.version
122 return c
122 return c
123
123
124 @propertycache
124 @propertycache
125 def manifest(self):
125 def manifest(self):
126 return manifest.manifest(self.sopener)
126 return manifest.manifest(self.sopener)
127
127
128 @propertycache
128 @propertycache
129 def dirstate(self):
129 def dirstate(self):
130 return dirstate.dirstate(self.opener, self.ui, self.root)
130 return dirstate.dirstate(self.opener, self.ui, self.root)
131
131
132 def __getitem__(self, changeid):
132 def __getitem__(self, changeid):
133 if changeid is None:
133 if changeid is None:
134 return context.workingctx(self)
134 return context.workingctx(self)
135 return context.changectx(self, changeid)
135 return context.changectx(self, changeid)
136
136
137 def __contains__(self, changeid):
137 def __contains__(self, changeid):
138 try:
138 try:
139 return bool(self.lookup(changeid))
139 return bool(self.lookup(changeid))
140 except error.RepoLookupError:
140 except error.RepoLookupError:
141 return False
141 return False
142
142
143 def __nonzero__(self):
143 def __nonzero__(self):
144 return True
144 return True
145
145
146 def __len__(self):
146 def __len__(self):
147 return len(self.changelog)
147 return len(self.changelog)
148
148
149 def __iter__(self):
149 def __iter__(self):
150 for i in xrange(len(self)):
150 for i in xrange(len(self)):
151 yield i
151 yield i
152
152
153 def url(self):
153 def url(self):
154 return 'file:' + self.root
154 return 'file:' + self.root
155
155
156 def hook(self, name, throw=False, **args):
156 def hook(self, name, throw=False, **args):
157 return hook.hook(self.ui, self, name, throw, **args)
157 return hook.hook(self.ui, self, name, throw, **args)
158
158
159 tag_disallowed = ':\r\n'
159 tag_disallowed = ':\r\n'
160
160
161 def _tag(self, names, node, message, local, user, date, extra={}):
161 def _tag(self, names, node, message, local, user, date, extra={}):
162 if isinstance(names, str):
162 if isinstance(names, str):
163 allchars = names
163 allchars = names
164 names = (names,)
164 names = (names,)
165 else:
165 else:
166 allchars = ''.join(names)
166 allchars = ''.join(names)
167 for c in self.tag_disallowed:
167 for c in self.tag_disallowed:
168 if c in allchars:
168 if c in allchars:
169 raise util.Abort(_('%r cannot be used in a tag name') % c)
169 raise util.Abort(_('%r cannot be used in a tag name') % c)
170
170
171 branches = self.branchmap()
171 branches = self.branchmap()
172 for name in names:
172 for name in names:
173 self.hook('pretag', throw=True, node=hex(node), tag=name,
173 self.hook('pretag', throw=True, node=hex(node), tag=name,
174 local=local)
174 local=local)
175 if name in branches:
175 if name in branches:
176 self.ui.warn(_("warning: tag %s conflicts with existing"
176 self.ui.warn(_("warning: tag %s conflicts with existing"
177 " branch name\n") % name)
177 " branch name\n") % name)
178
178
179 def writetags(fp, names, munge, prevtags):
179 def writetags(fp, names, munge, prevtags):
180 fp.seek(0, 2)
180 fp.seek(0, 2)
181 if prevtags and prevtags[-1] != '\n':
181 if prevtags and prevtags[-1] != '\n':
182 fp.write('\n')
182 fp.write('\n')
183 for name in names:
183 for name in names:
184 m = munge and munge(name) or name
184 m = munge and munge(name) or name
185 if self._tagtypes and name in self._tagtypes:
185 if self._tagtypes and name in self._tagtypes:
186 old = self._tags.get(name, nullid)
186 old = self._tags.get(name, nullid)
187 fp.write('%s %s\n' % (hex(old), m))
187 fp.write('%s %s\n' % (hex(old), m))
188 fp.write('%s %s\n' % (hex(node), m))
188 fp.write('%s %s\n' % (hex(node), m))
189 fp.close()
189 fp.close()
190
190
191 prevtags = ''
191 prevtags = ''
192 if local:
192 if local:
193 try:
193 try:
194 fp = self.opener('localtags', 'r+')
194 fp = self.opener('localtags', 'r+')
195 except IOError:
195 except IOError:
196 fp = self.opener('localtags', 'a')
196 fp = self.opener('localtags', 'a')
197 else:
197 else:
198 prevtags = fp.read()
198 prevtags = fp.read()
199
199
200 # local tags are stored in the current charset
200 # local tags are stored in the current charset
201 writetags(fp, names, None, prevtags)
201 writetags(fp, names, None, prevtags)
202 for name in names:
202 for name in names:
203 self.hook('tag', node=hex(node), tag=name, local=local)
203 self.hook('tag', node=hex(node), tag=name, local=local)
204 return
204 return
205
205
206 try:
206 try:
207 fp = self.wfile('.hgtags', 'rb+')
207 fp = self.wfile('.hgtags', 'rb+')
208 except IOError:
208 except IOError:
209 fp = self.wfile('.hgtags', 'ab')
209 fp = self.wfile('.hgtags', 'ab')
210 else:
210 else:
211 prevtags = fp.read()
211 prevtags = fp.read()
212
212
213 # committed tags are stored in UTF-8
213 # committed tags are stored in UTF-8
214 writetags(fp, names, encoding.fromlocal, prevtags)
214 writetags(fp, names, encoding.fromlocal, prevtags)
215
215
216 if '.hgtags' not in self.dirstate:
216 if '.hgtags' not in self.dirstate:
217 self[None].add(['.hgtags'])
217 self[None].add(['.hgtags'])
218
218
219 m = matchmod.exact(self.root, '', ['.hgtags'])
219 m = matchmod.exact(self.root, '', ['.hgtags'])
220 tagnode = self.commit(message, user, date, extra=extra, match=m)
220 tagnode = self.commit(message, user, date, extra=extra, match=m)
221
221
222 for name in names:
222 for name in names:
223 self.hook('tag', node=hex(node), tag=name, local=local)
223 self.hook('tag', node=hex(node), tag=name, local=local)
224
224
225 return tagnode
225 return tagnode
226
226
227 def tag(self, names, node, message, local, user, date):
227 def tag(self, names, node, message, local, user, date):
228 '''tag a revision with one or more symbolic names.
228 '''tag a revision with one or more symbolic names.
229
229
230 names is a list of strings or, when adding a single tag, names may be a
230 names is a list of strings or, when adding a single tag, names may be a
231 string.
231 string.
232
232
233 if local is True, the tags are stored in a per-repository file.
233 if local is True, the tags are stored in a per-repository file.
234 otherwise, they are stored in the .hgtags file, and a new
234 otherwise, they are stored in the .hgtags file, and a new
235 changeset is committed with the change.
235 changeset is committed with the change.
236
236
237 keyword arguments:
237 keyword arguments:
238
238
239 local: whether to store tags in non-version-controlled file
239 local: whether to store tags in non-version-controlled file
240 (default False)
240 (default False)
241
241
242 message: commit message to use if committing
242 message: commit message to use if committing
243
243
244 user: name of user to use if committing
244 user: name of user to use if committing
245
245
246 date: date tuple to use if committing'''
246 date: date tuple to use if committing'''
247
247
248 for x in self.status()[:5]:
248 for x in self.status()[:5]:
249 if '.hgtags' in x:
249 if '.hgtags' in x:
250 raise util.Abort(_('working copy of .hgtags is changed '
250 raise util.Abort(_('working copy of .hgtags is changed '
251 '(please commit .hgtags manually)'))
251 '(please commit .hgtags manually)'))
252
252
253 self.tags() # instantiate the cache
253 self.tags() # instantiate the cache
254 self._tag(names, node, message, local, user, date)
254 self._tag(names, node, message, local, user, date)
255
255
256 def tags(self):
256 def tags(self):
257 '''return a mapping of tag to node'''
257 '''return a mapping of tag to node'''
258 if self._tags is None:
258 if self._tags is None:
259 (self._tags, self._tagtypes) = self._findtags()
259 (self._tags, self._tagtypes) = self._findtags()
260
260
261 return self._tags
261 return self._tags
262
262
263 def _findtags(self):
263 def _findtags(self):
264 '''Do the hard work of finding tags. Return a pair of dicts
264 '''Do the hard work of finding tags. Return a pair of dicts
265 (tags, tagtypes) where tags maps tag name to node, and tagtypes
265 (tags, tagtypes) where tags maps tag name to node, and tagtypes
266 maps tag name to a string like \'global\' or \'local\'.
266 maps tag name to a string like \'global\' or \'local\'.
267 Subclasses or extensions are free to add their own tags, but
267 Subclasses or extensions are free to add their own tags, but
268 should be aware that the returned dicts will be retained for the
268 should be aware that the returned dicts will be retained for the
269 duration of the localrepo object.'''
269 duration of the localrepo object.'''
270
270
271 # XXX what tagtype should subclasses/extensions use? Currently
271 # XXX what tagtype should subclasses/extensions use? Currently
272 # mq and bookmarks add tags, but do not set the tagtype at all.
272 # mq and bookmarks add tags, but do not set the tagtype at all.
273 # Should each extension invent its own tag type? Should there
273 # Should each extension invent its own tag type? Should there
274 # be one tagtype for all such "virtual" tags? Or is the status
274 # be one tagtype for all such "virtual" tags? Or is the status
275 # quo fine?
275 # quo fine?
276
276
277 alltags = {} # map tag name to (node, hist)
277 alltags = {} # map tag name to (node, hist)
278 tagtypes = {}
278 tagtypes = {}
279
279
280 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
280 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
281 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
281 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
282
282
283 # Build the return dicts. Have to re-encode tag names because
283 # Build the return dicts. Have to re-encode tag names because
284 # the tags module always uses UTF-8 (in order not to lose info
284 # the tags module always uses UTF-8 (in order not to lose info
285 # writing to the cache), but the rest of Mercurial wants them in
285 # writing to the cache), but the rest of Mercurial wants them in
286 # local encoding.
286 # local encoding.
287 tags = {}
287 tags = {}
288 for (name, (node, hist)) in alltags.iteritems():
288 for (name, (node, hist)) in alltags.iteritems():
289 if node != nullid:
289 if node != nullid:
290 tags[encoding.tolocal(name)] = node
290 tags[encoding.tolocal(name)] = node
291 tags['tip'] = self.changelog.tip()
291 tags['tip'] = self.changelog.tip()
292 tagtypes = dict([(encoding.tolocal(name), value)
292 tagtypes = dict([(encoding.tolocal(name), value)
293 for (name, value) in tagtypes.iteritems()])
293 for (name, value) in tagtypes.iteritems()])
294 return (tags, tagtypes)
294 return (tags, tagtypes)
295
295
296 def tagtype(self, tagname):
296 def tagtype(self, tagname):
297 '''
297 '''
298 return the type of the given tag. result can be:
298 return the type of the given tag. result can be:
299
299
300 'local' : a local tag
300 'local' : a local tag
301 'global' : a global tag
301 'global' : a global tag
302 None : tag does not exist
302 None : tag does not exist
303 '''
303 '''
304
304
305 self.tags()
305 self.tags()
306
306
307 return self._tagtypes.get(tagname)
307 return self._tagtypes.get(tagname)
308
308
309 def tagslist(self):
309 def tagslist(self):
310 '''return a list of tags ordered by revision'''
310 '''return a list of tags ordered by revision'''
311 l = []
311 l = []
312 for t, n in self.tags().iteritems():
312 for t, n in self.tags().iteritems():
313 try:
313 try:
314 r = self.changelog.rev(n)
314 r = self.changelog.rev(n)
315 except:
315 except:
316 r = -2 # sort to the beginning of the list if unknown
316 r = -2 # sort to the beginning of the list if unknown
317 l.append((r, t, n))
317 l.append((r, t, n))
318 return [(t, n) for r, t, n in sorted(l)]
318 return [(t, n) for r, t, n in sorted(l)]
319
319
320 def nodetags(self, node):
320 def nodetags(self, node):
321 '''return the tags associated with a node'''
321 '''return the tags associated with a node'''
322 if not self.nodetagscache:
322 if not self.nodetagscache:
323 self.nodetagscache = {}
323 self.nodetagscache = {}
324 for t, n in self.tags().iteritems():
324 for t, n in self.tags().iteritems():
325 self.nodetagscache.setdefault(n, []).append(t)
325 self.nodetagscache.setdefault(n, []).append(t)
326 for tags in self.nodetagscache.itervalues():
326 for tags in self.nodetagscache.itervalues():
327 tags.sort()
327 tags.sort()
328 return self.nodetagscache.get(node, [])
328 return self.nodetagscache.get(node, [])
329
329
330 def _branchtags(self, partial, lrev):
330 def _branchtags(self, partial, lrev):
331 # TODO: rename this function?
331 # TODO: rename this function?
332 tiprev = len(self) - 1
332 tiprev = len(self) - 1
333 if lrev != tiprev:
333 if lrev != tiprev:
334 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
334 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
335 self._updatebranchcache(partial, ctxgen)
335 self._updatebranchcache(partial, ctxgen)
336 self._writebranchcache(partial, self.changelog.tip(), tiprev)
336 self._writebranchcache(partial, self.changelog.tip(), tiprev)
337
337
338 return partial
338 return partial
339
339
340 def branchmap(self):
340 def branchmap(self):
341 '''returns a dictionary {branch: [branchheads]}'''
341 '''returns a dictionary {branch: [branchheads]}'''
342 tip = self.changelog.tip()
342 tip = self.changelog.tip()
343 if self._branchcache is not None and self._branchcachetip == tip:
343 if self._branchcache is not None and self._branchcachetip == tip:
344 return self._branchcache
344 return self._branchcache
345
345
346 oldtip = self._branchcachetip
346 oldtip = self._branchcachetip
347 self._branchcachetip = tip
347 self._branchcachetip = tip
348 if oldtip is None or oldtip not in self.changelog.nodemap:
348 if oldtip is None or oldtip not in self.changelog.nodemap:
349 partial, last, lrev = self._readbranchcache()
349 partial, last, lrev = self._readbranchcache()
350 else:
350 else:
351 lrev = self.changelog.rev(oldtip)
351 lrev = self.changelog.rev(oldtip)
352 partial = self._branchcache
352 partial = self._branchcache
353
353
354 self._branchtags(partial, lrev)
354 self._branchtags(partial, lrev)
355 # this private cache holds all heads (not just tips)
355 # this private cache holds all heads (not just tips)
356 self._branchcache = partial
356 self._branchcache = partial
357
357
358 return self._branchcache
358 return self._branchcache
359
359
360 def branchtags(self):
360 def branchtags(self):
361 '''return a dict where branch names map to the tipmost head of
361 '''return a dict where branch names map to the tipmost head of
362 the branch, open heads come before closed'''
362 the branch, open heads come before closed'''
363 bt = {}
363 bt = {}
364 for bn, heads in self.branchmap().iteritems():
364 for bn, heads in self.branchmap().iteritems():
365 tip = heads[-1]
365 tip = heads[-1]
366 for h in reversed(heads):
366 for h in reversed(heads):
367 if 'close' not in self.changelog.read(h)[5]:
367 if 'close' not in self.changelog.read(h)[5]:
368 tip = h
368 tip = h
369 break
369 break
370 bt[bn] = tip
370 bt[bn] = tip
371 return bt
371 return bt
372
372
373
373
374 def _readbranchcache(self):
374 def _readbranchcache(self):
375 partial = {}
375 partial = {}
376 try:
376 try:
377 f = self.opener("branchheads.cache")
377 f = self.opener("branchheads.cache")
378 lines = f.read().split('\n')
378 lines = f.read().split('\n')
379 f.close()
379 f.close()
380 except (IOError, OSError):
380 except (IOError, OSError):
381 return {}, nullid, nullrev
381 return {}, nullid, nullrev
382
382
383 try:
383 try:
384 last, lrev = lines.pop(0).split(" ", 1)
384 last, lrev = lines.pop(0).split(" ", 1)
385 last, lrev = bin(last), int(lrev)
385 last, lrev = bin(last), int(lrev)
386 if lrev >= len(self) or self[lrev].node() != last:
386 if lrev >= len(self) or self[lrev].node() != last:
387 # invalidate the cache
387 # invalidate the cache
388 raise ValueError('invalidating branch cache (tip differs)')
388 raise ValueError('invalidating branch cache (tip differs)')
389 for l in lines:
389 for l in lines:
390 if not l:
390 if not l:
391 continue
391 continue
392 node, label = l.split(" ", 1)
392 node, label = l.split(" ", 1)
393 partial.setdefault(label.strip(), []).append(bin(node))
393 partial.setdefault(label.strip(), []).append(bin(node))
394 except KeyboardInterrupt:
394 except KeyboardInterrupt:
395 raise
395 raise
396 except Exception, inst:
396 except Exception, inst:
397 if self.ui.debugflag:
397 if self.ui.debugflag:
398 self.ui.warn(str(inst), '\n')
398 self.ui.warn(str(inst), '\n')
399 partial, last, lrev = {}, nullid, nullrev
399 partial, last, lrev = {}, nullid, nullrev
400 return partial, last, lrev
400 return partial, last, lrev
401
401
402 def _writebranchcache(self, branches, tip, tiprev):
402 def _writebranchcache(self, branches, tip, tiprev):
403 try:
403 try:
404 f = self.opener("branchheads.cache", "w", atomictemp=True)
404 f = self.opener("branchheads.cache", "w", atomictemp=True)
405 f.write("%s %s\n" % (hex(tip), tiprev))
405 f.write("%s %s\n" % (hex(tip), tiprev))
406 for label, nodes in branches.iteritems():
406 for label, nodes in branches.iteritems():
407 for node in nodes:
407 for node in nodes:
408 f.write("%s %s\n" % (hex(node), label))
408 f.write("%s %s\n" % (hex(node), label))
409 f.rename()
409 f.rename()
410 except (IOError, OSError):
410 except (IOError, OSError):
411 pass
411 pass
412
412
413 def _updatebranchcache(self, partial, ctxgen):
413 def _updatebranchcache(self, partial, ctxgen):
414 # collect new branch entries
414 # collect new branch entries
415 newbranches = {}
415 newbranches = {}
416 for c in ctxgen:
416 for c in ctxgen:
417 newbranches.setdefault(c.branch(), []).append(c.node())
417 newbranches.setdefault(c.branch(), []).append(c.node())
418 # if older branchheads are reachable from new ones, they aren't
418 # if older branchheads are reachable from new ones, they aren't
419 # really branchheads. Note checking parents is insufficient:
419 # really branchheads. Note checking parents is insufficient:
420 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
420 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
421 for branch, newnodes in newbranches.iteritems():
421 for branch, newnodes in newbranches.iteritems():
422 bheads = partial.setdefault(branch, [])
422 bheads = partial.setdefault(branch, [])
423 bheads.extend(newnodes)
423 bheads.extend(newnodes)
424 if len(bheads) <= 1:
424 if len(bheads) <= 1:
425 continue
425 continue
426 # starting from tip means fewer passes over reachable
426 # starting from tip means fewer passes over reachable
427 while newnodes:
427 while newnodes:
428 latest = newnodes.pop()
428 latest = newnodes.pop()
429 if latest not in bheads:
429 if latest not in bheads:
430 continue
430 continue
431 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
431 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
432 reachable = self.changelog.reachable(latest, minbhrev)
432 reachable = self.changelog.reachable(latest, minbhrev)
433 reachable.remove(latest)
433 reachable.remove(latest)
434 bheads = [b for b in bheads if b not in reachable]
434 bheads = [b for b in bheads if b not in reachable]
435 partial[branch] = bheads
435 partial[branch] = bheads
436
436
437 def lookup(self, key):
437 def lookup(self, key):
438 if isinstance(key, int):
438 if isinstance(key, int):
439 return self.changelog.node(key)
439 return self.changelog.node(key)
440 elif key == '.':
440 elif key == '.':
441 return self.dirstate.parents()[0]
441 return self.dirstate.parents()[0]
442 elif key == 'null':
442 elif key == 'null':
443 return nullid
443 return nullid
444 elif key == 'tip':
444 elif key == 'tip':
445 return self.changelog.tip()
445 return self.changelog.tip()
446 n = self.changelog._match(key)
446 n = self.changelog._match(key)
447 if n:
447 if n:
448 return n
448 return n
449 if key in self.tags():
449 if key in self.tags():
450 return self.tags()[key]
450 return self.tags()[key]
451 if key in self.branchtags():
451 if key in self.branchtags():
452 return self.branchtags()[key]
452 return self.branchtags()[key]
453 n = self.changelog._partialmatch(key)
453 n = self.changelog._partialmatch(key)
454 if n:
454 if n:
455 return n
455 return n
456
456
457 # can't find key, check if it might have come from damaged dirstate
457 # can't find key, check if it might have come from damaged dirstate
458 if key in self.dirstate.parents():
458 if key in self.dirstate.parents():
459 raise error.Abort(_("working directory has unknown parent '%s'!")
459 raise error.Abort(_("working directory has unknown parent '%s'!")
460 % short(key))
460 % short(key))
461 try:
461 try:
462 if len(key) == 20:
462 if len(key) == 20:
463 key = hex(key)
463 key = hex(key)
464 except:
464 except:
465 pass
465 pass
466 raise error.RepoLookupError(_("unknown revision '%s'") % key)
466 raise error.RepoLookupError(_("unknown revision '%s'") % key)
467
467
468 def lookupbranch(self, key, remote=None):
468 def lookupbranch(self, key, remote=None):
469 repo = remote or self
469 repo = remote or self
470 if key in repo.branchmap():
470 if key in repo.branchmap():
471 return key
471 return key
472
472
473 repo = (remote and remote.local()) and remote or self
473 repo = (remote and remote.local()) and remote or self
474 return repo[key].branch()
474 return repo[key].branch()
475
475
476 def local(self):
476 def local(self):
477 return True
477 return True
478
478
479 def join(self, f):
479 def join(self, f):
480 return os.path.join(self.path, f)
480 return os.path.join(self.path, f)
481
481
482 def wjoin(self, f):
482 def wjoin(self, f):
483 return os.path.join(self.root, f)
483 return os.path.join(self.root, f)
484
484
485 def rjoin(self, f):
486 return os.path.join(self.root, util.pconvert(f))
487
488 def file(self, f):
485 def file(self, f):
489 if f[0] == '/':
486 if f[0] == '/':
490 f = f[1:]
487 f = f[1:]
491 return filelog.filelog(self.sopener, f)
488 return filelog.filelog(self.sopener, f)
492
489
493 def changectx(self, changeid):
490 def changectx(self, changeid):
494 return self[changeid]
491 return self[changeid]
495
492
496 def parents(self, changeid=None):
493 def parents(self, changeid=None):
497 '''get list of changectxs for parents of changeid'''
494 '''get list of changectxs for parents of changeid'''
498 return self[changeid].parents()
495 return self[changeid].parents()
499
496
500 def filectx(self, path, changeid=None, fileid=None):
497 def filectx(self, path, changeid=None, fileid=None):
501 """changeid can be a changeset revision, node, or tag.
498 """changeid can be a changeset revision, node, or tag.
502 fileid can be a file revision or node."""
499 fileid can be a file revision or node."""
503 return context.filectx(self, path, changeid, fileid)
500 return context.filectx(self, path, changeid, fileid)
504
501
505 def getcwd(self):
502 def getcwd(self):
506 return self.dirstate.getcwd()
503 return self.dirstate.getcwd()
507
504
508 def pathto(self, f, cwd=None):
505 def pathto(self, f, cwd=None):
509 return self.dirstate.pathto(f, cwd)
506 return self.dirstate.pathto(f, cwd)
510
507
511 def wfile(self, f, mode='r'):
508 def wfile(self, f, mode='r'):
512 return self.wopener(f, mode)
509 return self.wopener(f, mode)
513
510
514 def _link(self, f):
511 def _link(self, f):
515 return os.path.islink(self.wjoin(f))
512 return os.path.islink(self.wjoin(f))
516
513
517 def _loadfilter(self, filter):
514 def _loadfilter(self, filter):
518 if filter not in self.filterpats:
515 if filter not in self.filterpats:
519 l = []
516 l = []
520 for pat, cmd in self.ui.configitems(filter):
517 for pat, cmd in self.ui.configitems(filter):
521 if cmd == '!':
518 if cmd == '!':
522 continue
519 continue
523 mf = matchmod.match(self.root, '', [pat])
520 mf = matchmod.match(self.root, '', [pat])
524 fn = None
521 fn = None
525 params = cmd
522 params = cmd
526 for name, filterfn in self._datafilters.iteritems():
523 for name, filterfn in self._datafilters.iteritems():
527 if cmd.startswith(name):
524 if cmd.startswith(name):
528 fn = filterfn
525 fn = filterfn
529 params = cmd[len(name):].lstrip()
526 params = cmd[len(name):].lstrip()
530 break
527 break
531 if not fn:
528 if not fn:
532 fn = lambda s, c, **kwargs: util.filter(s, c)
529 fn = lambda s, c, **kwargs: util.filter(s, c)
533 # Wrap old filters not supporting keyword arguments
530 # Wrap old filters not supporting keyword arguments
534 if not inspect.getargspec(fn)[2]:
531 if not inspect.getargspec(fn)[2]:
535 oldfn = fn
532 oldfn = fn
536 fn = lambda s, c, **kwargs: oldfn(s, c)
533 fn = lambda s, c, **kwargs: oldfn(s, c)
537 l.append((mf, fn, params))
534 l.append((mf, fn, params))
538 self.filterpats[filter] = l
535 self.filterpats[filter] = l
539
536
540 def _filter(self, filter, filename, data):
537 def _filter(self, filter, filename, data):
541 self._loadfilter(filter)
538 self._loadfilter(filter)
542
539
543 for mf, fn, cmd in self.filterpats[filter]:
540 for mf, fn, cmd in self.filterpats[filter]:
544 if mf(filename):
541 if mf(filename):
545 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
542 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
546 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
543 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
547 break
544 break
548
545
549 return data
546 return data
550
547
551 def adddatafilter(self, name, filter):
548 def adddatafilter(self, name, filter):
552 self._datafilters[name] = filter
549 self._datafilters[name] = filter
553
550
554 def wread(self, filename):
551 def wread(self, filename):
555 if self._link(filename):
552 if self._link(filename):
556 data = os.readlink(self.wjoin(filename))
553 data = os.readlink(self.wjoin(filename))
557 else:
554 else:
558 data = self.wopener(filename, 'r').read()
555 data = self.wopener(filename, 'r').read()
559 return self._filter("encode", filename, data)
556 return self._filter("encode", filename, data)
560
557
561 def wwrite(self, filename, data, flags):
558 def wwrite(self, filename, data, flags):
562 data = self._filter("decode", filename, data)
559 data = self._filter("decode", filename, data)
563 try:
560 try:
564 os.unlink(self.wjoin(filename))
561 os.unlink(self.wjoin(filename))
565 except OSError:
562 except OSError:
566 pass
563 pass
567 if 'l' in flags:
564 if 'l' in flags:
568 self.wopener.symlink(data, filename)
565 self.wopener.symlink(data, filename)
569 else:
566 else:
570 self.wopener(filename, 'w').write(data)
567 self.wopener(filename, 'w').write(data)
571 if 'x' in flags:
568 if 'x' in flags:
572 util.set_flags(self.wjoin(filename), False, True)
569 util.set_flags(self.wjoin(filename), False, True)
573
570
574 def wwritedata(self, filename, data):
571 def wwritedata(self, filename, data):
575 return self._filter("decode", filename, data)
572 return self._filter("decode", filename, data)
576
573
577 def transaction(self, desc):
574 def transaction(self, desc):
578 tr = self._transref and self._transref() or None
575 tr = self._transref and self._transref() or None
579 if tr and tr.running():
576 if tr and tr.running():
580 return tr.nest()
577 return tr.nest()
581
578
582 # abort here if the journal already exists
579 # abort here if the journal already exists
583 if os.path.exists(self.sjoin("journal")):
580 if os.path.exists(self.sjoin("journal")):
584 raise error.RepoError(
581 raise error.RepoError(
585 _("abandoned transaction found - run hg recover"))
582 _("abandoned transaction found - run hg recover"))
586
583
587 # save dirstate for rollback
584 # save dirstate for rollback
588 try:
585 try:
589 ds = self.opener("dirstate").read()
586 ds = self.opener("dirstate").read()
590 except IOError:
587 except IOError:
591 ds = ""
588 ds = ""
592 self.opener("journal.dirstate", "w").write(ds)
589 self.opener("journal.dirstate", "w").write(ds)
593 self.opener("journal.branch", "w").write(self.dirstate.branch())
590 self.opener("journal.branch", "w").write(self.dirstate.branch())
594 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
591 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
595
592
596 renames = [(self.sjoin("journal"), self.sjoin("undo")),
593 renames = [(self.sjoin("journal"), self.sjoin("undo")),
597 (self.join("journal.dirstate"), self.join("undo.dirstate")),
594 (self.join("journal.dirstate"), self.join("undo.dirstate")),
598 (self.join("journal.branch"), self.join("undo.branch")),
595 (self.join("journal.branch"), self.join("undo.branch")),
599 (self.join("journal.desc"), self.join("undo.desc"))]
596 (self.join("journal.desc"), self.join("undo.desc"))]
600 tr = transaction.transaction(self.ui.warn, self.sopener,
597 tr = transaction.transaction(self.ui.warn, self.sopener,
601 self.sjoin("journal"),
598 self.sjoin("journal"),
602 aftertrans(renames),
599 aftertrans(renames),
603 self.store.createmode)
600 self.store.createmode)
604 self._transref = weakref.ref(tr)
601 self._transref = weakref.ref(tr)
605 return tr
602 return tr
606
603
607 def recover(self):
604 def recover(self):
608 lock = self.lock()
605 lock = self.lock()
609 try:
606 try:
610 if os.path.exists(self.sjoin("journal")):
607 if os.path.exists(self.sjoin("journal")):
611 self.ui.status(_("rolling back interrupted transaction\n"))
608 self.ui.status(_("rolling back interrupted transaction\n"))
612 transaction.rollback(self.sopener, self.sjoin("journal"),
609 transaction.rollback(self.sopener, self.sjoin("journal"),
613 self.ui.warn)
610 self.ui.warn)
614 self.invalidate()
611 self.invalidate()
615 return True
612 return True
616 else:
613 else:
617 self.ui.warn(_("no interrupted transaction available\n"))
614 self.ui.warn(_("no interrupted transaction available\n"))
618 return False
615 return False
619 finally:
616 finally:
620 lock.release()
617 lock.release()
621
618
622 def rollback(self, dryrun=False):
619 def rollback(self, dryrun=False):
623 wlock = lock = None
620 wlock = lock = None
624 try:
621 try:
625 wlock = self.wlock()
622 wlock = self.wlock()
626 lock = self.lock()
623 lock = self.lock()
627 if os.path.exists(self.sjoin("undo")):
624 if os.path.exists(self.sjoin("undo")):
628 try:
625 try:
629 args = self.opener("undo.desc", "r").read().splitlines()
626 args = self.opener("undo.desc", "r").read().splitlines()
630 if len(args) >= 3 and self.ui.verbose:
627 if len(args) >= 3 and self.ui.verbose:
631 desc = _("rolling back to revision %s"
628 desc = _("rolling back to revision %s"
632 " (undo %s: %s)\n") % (
629 " (undo %s: %s)\n") % (
633 int(args[0]) - 1, args[1], args[2])
630 int(args[0]) - 1, args[1], args[2])
634 elif len(args) >= 2:
631 elif len(args) >= 2:
635 desc = _("rolling back to revision %s (undo %s)\n") % (
632 desc = _("rolling back to revision %s (undo %s)\n") % (
636 int(args[0]) - 1, args[1])
633 int(args[0]) - 1, args[1])
637 except IOError:
634 except IOError:
638 desc = _("rolling back unknown transaction\n")
635 desc = _("rolling back unknown transaction\n")
639 self.ui.status(desc)
636 self.ui.status(desc)
640 if dryrun:
637 if dryrun:
641 return
638 return
642 transaction.rollback(self.sopener, self.sjoin("undo"),
639 transaction.rollback(self.sopener, self.sjoin("undo"),
643 self.ui.warn)
640 self.ui.warn)
644 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
641 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
645 try:
642 try:
646 branch = self.opener("undo.branch").read()
643 branch = self.opener("undo.branch").read()
647 self.dirstate.setbranch(branch)
644 self.dirstate.setbranch(branch)
648 except IOError:
645 except IOError:
649 self.ui.warn(_("Named branch could not be reset, "
646 self.ui.warn(_("Named branch could not be reset, "
650 "current branch still is: %s\n")
647 "current branch still is: %s\n")
651 % encoding.tolocal(self.dirstate.branch()))
648 % encoding.tolocal(self.dirstate.branch()))
652 self.invalidate()
649 self.invalidate()
653 self.dirstate.invalidate()
650 self.dirstate.invalidate()
654 self.destroyed()
651 self.destroyed()
655 else:
652 else:
656 self.ui.warn(_("no rollback information available\n"))
653 self.ui.warn(_("no rollback information available\n"))
657 return 1
654 return 1
658 finally:
655 finally:
659 release(lock, wlock)
656 release(lock, wlock)
660
657
661 def invalidatecaches(self):
658 def invalidatecaches(self):
662 self._tags = None
659 self._tags = None
663 self._tagtypes = None
660 self._tagtypes = None
664 self.nodetagscache = None
661 self.nodetagscache = None
665 self._branchcache = None # in UTF-8
662 self._branchcache = None # in UTF-8
666 self._branchcachetip = None
663 self._branchcachetip = None
667
664
668 def invalidate(self):
665 def invalidate(self):
669 for a in "changelog manifest".split():
666 for a in "changelog manifest".split():
670 if a in self.__dict__:
667 if a in self.__dict__:
671 delattr(self, a)
668 delattr(self, a)
672 self.invalidatecaches()
669 self.invalidatecaches()
673
670
674 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
671 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
675 try:
672 try:
676 l = lock.lock(lockname, 0, releasefn, desc=desc)
673 l = lock.lock(lockname, 0, releasefn, desc=desc)
677 except error.LockHeld, inst:
674 except error.LockHeld, inst:
678 if not wait:
675 if not wait:
679 raise
676 raise
680 self.ui.warn(_("waiting for lock on %s held by %r\n") %
677 self.ui.warn(_("waiting for lock on %s held by %r\n") %
681 (desc, inst.locker))
678 (desc, inst.locker))
682 # default to 600 seconds timeout
679 # default to 600 seconds timeout
683 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
680 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
684 releasefn, desc=desc)
681 releasefn, desc=desc)
685 if acquirefn:
682 if acquirefn:
686 acquirefn()
683 acquirefn()
687 return l
684 return l
688
685
689 def lock(self, wait=True):
686 def lock(self, wait=True):
690 '''Lock the repository store (.hg/store) and return a weak reference
687 '''Lock the repository store (.hg/store) and return a weak reference
691 to the lock. Use this before modifying the store (e.g. committing or
688 to the lock. Use this before modifying the store (e.g. committing or
692 stripping). If you are opening a transaction, get a lock as well.)'''
689 stripping). If you are opening a transaction, get a lock as well.)'''
693 l = self._lockref and self._lockref()
690 l = self._lockref and self._lockref()
694 if l is not None and l.held:
691 if l is not None and l.held:
695 l.lock()
692 l.lock()
696 return l
693 return l
697
694
698 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
695 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
699 _('repository %s') % self.origroot)
696 _('repository %s') % self.origroot)
700 self._lockref = weakref.ref(l)
697 self._lockref = weakref.ref(l)
701 return l
698 return l
702
699
703 def wlock(self, wait=True):
700 def wlock(self, wait=True):
704 '''Lock the non-store parts of the repository (everything under
701 '''Lock the non-store parts of the repository (everything under
705 .hg except .hg/store) and return a weak reference to the lock.
702 .hg except .hg/store) and return a weak reference to the lock.
706 Use this before modifying files in .hg.'''
703 Use this before modifying files in .hg.'''
707 l = self._wlockref and self._wlockref()
704 l = self._wlockref and self._wlockref()
708 if l is not None and l.held:
705 if l is not None and l.held:
709 l.lock()
706 l.lock()
710 return l
707 return l
711
708
712 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
709 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
713 self.dirstate.invalidate, _('working directory of %s') %
710 self.dirstate.invalidate, _('working directory of %s') %
714 self.origroot)
711 self.origroot)
715 self._wlockref = weakref.ref(l)
712 self._wlockref = weakref.ref(l)
716 return l
713 return l
717
714
718 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
715 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
719 """
716 """
720 commit an individual file as part of a larger transaction
717 commit an individual file as part of a larger transaction
721 """
718 """
722
719
723 fname = fctx.path()
720 fname = fctx.path()
724 text = fctx.data()
721 text = fctx.data()
725 flog = self.file(fname)
722 flog = self.file(fname)
726 fparent1 = manifest1.get(fname, nullid)
723 fparent1 = manifest1.get(fname, nullid)
727 fparent2 = fparent2o = manifest2.get(fname, nullid)
724 fparent2 = fparent2o = manifest2.get(fname, nullid)
728
725
729 meta = {}
726 meta = {}
730 copy = fctx.renamed()
727 copy = fctx.renamed()
731 if copy and copy[0] != fname:
728 if copy and copy[0] != fname:
732 # Mark the new revision of this file as a copy of another
729 # Mark the new revision of this file as a copy of another
733 # file. This copy data will effectively act as a parent
730 # file. This copy data will effectively act as a parent
734 # of this new revision. If this is a merge, the first
731 # of this new revision. If this is a merge, the first
735 # parent will be the nullid (meaning "look up the copy data")
732 # parent will be the nullid (meaning "look up the copy data")
736 # and the second one will be the other parent. For example:
733 # and the second one will be the other parent. For example:
737 #
734 #
738 # 0 --- 1 --- 3 rev1 changes file foo
735 # 0 --- 1 --- 3 rev1 changes file foo
739 # \ / rev2 renames foo to bar and changes it
736 # \ / rev2 renames foo to bar and changes it
740 # \- 2 -/ rev3 should have bar with all changes and
737 # \- 2 -/ rev3 should have bar with all changes and
741 # should record that bar descends from
738 # should record that bar descends from
742 # bar in rev2 and foo in rev1
739 # bar in rev2 and foo in rev1
743 #
740 #
744 # this allows this merge to succeed:
741 # this allows this merge to succeed:
745 #
742 #
746 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
743 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
747 # \ / merging rev3 and rev4 should use bar@rev2
744 # \ / merging rev3 and rev4 should use bar@rev2
748 # \- 2 --- 4 as the merge base
745 # \- 2 --- 4 as the merge base
749 #
746 #
750
747
751 cfname = copy[0]
748 cfname = copy[0]
752 crev = manifest1.get(cfname)
749 crev = manifest1.get(cfname)
753 newfparent = fparent2
750 newfparent = fparent2
754
751
755 if manifest2: # branch merge
752 if manifest2: # branch merge
756 if fparent2 == nullid or crev is None: # copied on remote side
753 if fparent2 == nullid or crev is None: # copied on remote side
757 if cfname in manifest2:
754 if cfname in manifest2:
758 crev = manifest2[cfname]
755 crev = manifest2[cfname]
759 newfparent = fparent1
756 newfparent = fparent1
760
757
761 # find source in nearest ancestor if we've lost track
758 # find source in nearest ancestor if we've lost track
762 if not crev:
759 if not crev:
763 self.ui.debug(" %s: searching for copy revision for %s\n" %
760 self.ui.debug(" %s: searching for copy revision for %s\n" %
764 (fname, cfname))
761 (fname, cfname))
765 for ancestor in self['.'].ancestors():
762 for ancestor in self['.'].ancestors():
766 if cfname in ancestor:
763 if cfname in ancestor:
767 crev = ancestor[cfname].filenode()
764 crev = ancestor[cfname].filenode()
768 break
765 break
769
766
770 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
767 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
771 meta["copy"] = cfname
768 meta["copy"] = cfname
772 meta["copyrev"] = hex(crev)
769 meta["copyrev"] = hex(crev)
773 fparent1, fparent2 = nullid, newfparent
770 fparent1, fparent2 = nullid, newfparent
774 elif fparent2 != nullid:
771 elif fparent2 != nullid:
775 # is one parent an ancestor of the other?
772 # is one parent an ancestor of the other?
776 fparentancestor = flog.ancestor(fparent1, fparent2)
773 fparentancestor = flog.ancestor(fparent1, fparent2)
777 if fparentancestor == fparent1:
774 if fparentancestor == fparent1:
778 fparent1, fparent2 = fparent2, nullid
775 fparent1, fparent2 = fparent2, nullid
779 elif fparentancestor == fparent2:
776 elif fparentancestor == fparent2:
780 fparent2 = nullid
777 fparent2 = nullid
781
778
782 # is the file changed?
779 # is the file changed?
783 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
780 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
784 changelist.append(fname)
781 changelist.append(fname)
785 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
782 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
786
783
787 # are just the flags changed during merge?
784 # are just the flags changed during merge?
788 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
785 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
789 changelist.append(fname)
786 changelist.append(fname)
790
787
791 return fparent1
788 return fparent1
792
789
793 def commit(self, text="", user=None, date=None, match=None, force=False,
790 def commit(self, text="", user=None, date=None, match=None, force=False,
794 editor=False, extra={}):
791 editor=False, extra={}):
795 """Add a new revision to current repository.
792 """Add a new revision to current repository.
796
793
797 Revision information is gathered from the working directory,
794 Revision information is gathered from the working directory,
798 match can be used to filter the committed files. If editor is
795 match can be used to filter the committed files. If editor is
799 supplied, it is called to get a commit message.
796 supplied, it is called to get a commit message.
800 """
797 """
801
798
802 def fail(f, msg):
799 def fail(f, msg):
803 raise util.Abort('%s: %s' % (f, msg))
800 raise util.Abort('%s: %s' % (f, msg))
804
801
805 if not match:
802 if not match:
806 match = matchmod.always(self.root, '')
803 match = matchmod.always(self.root, '')
807
804
808 if not force:
805 if not force:
809 vdirs = []
806 vdirs = []
810 match.dir = vdirs.append
807 match.dir = vdirs.append
811 match.bad = fail
808 match.bad = fail
812
809
813 wlock = self.wlock()
810 wlock = self.wlock()
814 try:
811 try:
815 wctx = self[None]
812 wctx = self[None]
816 merge = len(wctx.parents()) > 1
813 merge = len(wctx.parents()) > 1
817
814
818 if (not force and merge and match and
815 if (not force and merge and match and
819 (match.files() or match.anypats())):
816 (match.files() or match.anypats())):
820 raise util.Abort(_('cannot partially commit a merge '
817 raise util.Abort(_('cannot partially commit a merge '
821 '(do not specify files or patterns)'))
818 '(do not specify files or patterns)'))
822
819
823 changes = self.status(match=match, clean=force)
820 changes = self.status(match=match, clean=force)
824 if force:
821 if force:
825 changes[0].extend(changes[6]) # mq may commit unchanged files
822 changes[0].extend(changes[6]) # mq may commit unchanged files
826
823
827 # check subrepos
824 # check subrepos
828 subs = []
825 subs = []
829 removedsubs = set()
826 removedsubs = set()
830 for p in wctx.parents():
827 for p in wctx.parents():
831 removedsubs.update(s for s in p.substate if match(s))
828 removedsubs.update(s for s in p.substate if match(s))
832 for s in wctx.substate:
829 for s in wctx.substate:
833 removedsubs.discard(s)
830 removedsubs.discard(s)
834 if match(s) and wctx.sub(s).dirty():
831 if match(s) and wctx.sub(s).dirty():
835 subs.append(s)
832 subs.append(s)
836 if (subs or removedsubs):
833 if (subs or removedsubs):
837 if (not match('.hgsub') and
834 if (not match('.hgsub') and
838 '.hgsub' in (wctx.modified() + wctx.added())):
835 '.hgsub' in (wctx.modified() + wctx.added())):
839 raise util.Abort(_("can't commit subrepos without .hgsub"))
836 raise util.Abort(_("can't commit subrepos without .hgsub"))
840 if '.hgsubstate' not in changes[0]:
837 if '.hgsubstate' not in changes[0]:
841 changes[0].insert(0, '.hgsubstate')
838 changes[0].insert(0, '.hgsubstate')
842
839
843 # make sure all explicit patterns are matched
840 # make sure all explicit patterns are matched
844 if not force and match.files():
841 if not force and match.files():
845 matched = set(changes[0] + changes[1] + changes[2])
842 matched = set(changes[0] + changes[1] + changes[2])
846
843
847 for f in match.files():
844 for f in match.files():
848 if f == '.' or f in matched or f in wctx.substate:
845 if f == '.' or f in matched or f in wctx.substate:
849 continue
846 continue
850 if f in changes[3]: # missing
847 if f in changes[3]: # missing
851 fail(f, _('file not found!'))
848 fail(f, _('file not found!'))
852 if f in vdirs: # visited directory
849 if f in vdirs: # visited directory
853 d = f + '/'
850 d = f + '/'
854 for mf in matched:
851 for mf in matched:
855 if mf.startswith(d):
852 if mf.startswith(d):
856 break
853 break
857 else:
854 else:
858 fail(f, _("no match under directory!"))
855 fail(f, _("no match under directory!"))
859 elif f not in self.dirstate:
856 elif f not in self.dirstate:
860 fail(f, _("file not tracked!"))
857 fail(f, _("file not tracked!"))
861
858
862 if (not force and not extra.get("close") and not merge
859 if (not force and not extra.get("close") and not merge
863 and not (changes[0] or changes[1] or changes[2])
860 and not (changes[0] or changes[1] or changes[2])
864 and wctx.branch() == wctx.p1().branch()):
861 and wctx.branch() == wctx.p1().branch()):
865 return None
862 return None
866
863
867 ms = mergemod.mergestate(self)
864 ms = mergemod.mergestate(self)
868 for f in changes[0]:
865 for f in changes[0]:
869 if f in ms and ms[f] == 'u':
866 if f in ms and ms[f] == 'u':
870 raise util.Abort(_("unresolved merge conflicts "
867 raise util.Abort(_("unresolved merge conflicts "
871 "(see hg resolve)"))
868 "(see hg resolve)"))
872
869
873 cctx = context.workingctx(self, text, user, date, extra, changes)
870 cctx = context.workingctx(self, text, user, date, extra, changes)
874 if editor:
871 if editor:
875 cctx._text = editor(self, cctx, subs)
872 cctx._text = editor(self, cctx, subs)
876 edited = (text != cctx._text)
873 edited = (text != cctx._text)
877
874
878 # commit subs
875 # commit subs
879 if subs or removedsubs:
876 if subs or removedsubs:
880 state = wctx.substate.copy()
877 state = wctx.substate.copy()
881 for s in subs:
878 for s in subs:
882 sub = wctx.sub(s)
879 sub = wctx.sub(s)
883 self.ui.status(_('committing subrepository %s\n') %
880 self.ui.status(_('committing subrepository %s\n') %
884 subrepo.relpath(sub))
881 subrepo.relpath(sub))
885 sr = sub.commit(cctx._text, user, date)
882 sr = sub.commit(cctx._text, user, date)
886 state[s] = (state[s][0], sr)
883 state[s] = (state[s][0], sr)
887 subrepo.writestate(self, state)
884 subrepo.writestate(self, state)
888
885
889 # Save commit message in case this transaction gets rolled back
886 # Save commit message in case this transaction gets rolled back
890 # (e.g. by a pretxncommit hook). Leave the content alone on
887 # (e.g. by a pretxncommit hook). Leave the content alone on
891 # the assumption that the user will use the same editor again.
888 # the assumption that the user will use the same editor again.
892 msgfile = self.opener('last-message.txt', 'wb')
889 msgfile = self.opener('last-message.txt', 'wb')
893 msgfile.write(cctx._text)
890 msgfile.write(cctx._text)
894 msgfile.close()
891 msgfile.close()
895
892
896 p1, p2 = self.dirstate.parents()
893 p1, p2 = self.dirstate.parents()
897 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
894 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
898 try:
895 try:
899 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
896 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
900 ret = self.commitctx(cctx, True)
897 ret = self.commitctx(cctx, True)
901 except:
898 except:
902 if edited:
899 if edited:
903 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
900 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
904 self.ui.write(
901 self.ui.write(
905 _('note: commit message saved in %s\n') % msgfn)
902 _('note: commit message saved in %s\n') % msgfn)
906 raise
903 raise
907
904
908 # update dirstate and mergestate
905 # update dirstate and mergestate
909 for f in changes[0] + changes[1]:
906 for f in changes[0] + changes[1]:
910 self.dirstate.normal(f)
907 self.dirstate.normal(f)
911 for f in changes[2]:
908 for f in changes[2]:
912 self.dirstate.forget(f)
909 self.dirstate.forget(f)
913 self.dirstate.setparents(ret)
910 self.dirstate.setparents(ret)
914 ms.reset()
911 ms.reset()
915 finally:
912 finally:
916 wlock.release()
913 wlock.release()
917
914
918 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
915 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
919 return ret
916 return ret
920
917
921 def commitctx(self, ctx, error=False):
918 def commitctx(self, ctx, error=False):
922 """Add a new revision to current repository.
919 """Add a new revision to current repository.
923 Revision information is passed via the context argument.
920 Revision information is passed via the context argument.
924 """
921 """
925
922
926 tr = lock = None
923 tr = lock = None
927 removed = ctx.removed()
924 removed = ctx.removed()
928 p1, p2 = ctx.p1(), ctx.p2()
925 p1, p2 = ctx.p1(), ctx.p2()
929 m1 = p1.manifest().copy()
926 m1 = p1.manifest().copy()
930 m2 = p2.manifest()
927 m2 = p2.manifest()
931 user = ctx.user()
928 user = ctx.user()
932
929
933 lock = self.lock()
930 lock = self.lock()
934 try:
931 try:
935 tr = self.transaction("commit")
932 tr = self.transaction("commit")
936 trp = weakref.proxy(tr)
933 trp = weakref.proxy(tr)
937
934
938 # check in files
935 # check in files
939 new = {}
936 new = {}
940 changed = []
937 changed = []
941 linkrev = len(self)
938 linkrev = len(self)
942 for f in sorted(ctx.modified() + ctx.added()):
939 for f in sorted(ctx.modified() + ctx.added()):
943 self.ui.note(f + "\n")
940 self.ui.note(f + "\n")
944 try:
941 try:
945 fctx = ctx[f]
942 fctx = ctx[f]
946 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
943 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
947 changed)
944 changed)
948 m1.set(f, fctx.flags())
945 m1.set(f, fctx.flags())
949 except OSError, inst:
946 except OSError, inst:
950 self.ui.warn(_("trouble committing %s!\n") % f)
947 self.ui.warn(_("trouble committing %s!\n") % f)
951 raise
948 raise
952 except IOError, inst:
949 except IOError, inst:
953 errcode = getattr(inst, 'errno', errno.ENOENT)
950 errcode = getattr(inst, 'errno', errno.ENOENT)
954 if error or errcode and errcode != errno.ENOENT:
951 if error or errcode and errcode != errno.ENOENT:
955 self.ui.warn(_("trouble committing %s!\n") % f)
952 self.ui.warn(_("trouble committing %s!\n") % f)
956 raise
953 raise
957 else:
954 else:
958 removed.append(f)
955 removed.append(f)
959
956
960 # update manifest
957 # update manifest
961 m1.update(new)
958 m1.update(new)
962 removed = [f for f in sorted(removed) if f in m1 or f in m2]
959 removed = [f for f in sorted(removed) if f in m1 or f in m2]
963 drop = [f for f in removed if f in m1]
960 drop = [f for f in removed if f in m1]
964 for f in drop:
961 for f in drop:
965 del m1[f]
962 del m1[f]
966 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
963 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
967 p2.manifestnode(), (new, drop))
964 p2.manifestnode(), (new, drop))
968
965
969 # update changelog
966 # update changelog
970 self.changelog.delayupdate()
967 self.changelog.delayupdate()
971 n = self.changelog.add(mn, changed + removed, ctx.description(),
968 n = self.changelog.add(mn, changed + removed, ctx.description(),
972 trp, p1.node(), p2.node(),
969 trp, p1.node(), p2.node(),
973 user, ctx.date(), ctx.extra().copy())
970 user, ctx.date(), ctx.extra().copy())
974 p = lambda: self.changelog.writepending() and self.root or ""
971 p = lambda: self.changelog.writepending() and self.root or ""
975 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
972 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
976 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
973 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
977 parent2=xp2, pending=p)
974 parent2=xp2, pending=p)
978 self.changelog.finalize(trp)
975 self.changelog.finalize(trp)
979 tr.close()
976 tr.close()
980
977
981 if self._branchcache:
978 if self._branchcache:
982 self.branchtags()
979 self.branchtags()
983 return n
980 return n
984 finally:
981 finally:
985 if tr:
982 if tr:
986 tr.release()
983 tr.release()
987 lock.release()
984 lock.release()
988
985
989 def destroyed(self):
986 def destroyed(self):
990 '''Inform the repository that nodes have been destroyed.
987 '''Inform the repository that nodes have been destroyed.
991 Intended for use by strip and rollback, so there's a common
988 Intended for use by strip and rollback, so there's a common
992 place for anything that has to be done after destroying history.'''
989 place for anything that has to be done after destroying history.'''
993 # XXX it might be nice if we could take the list of destroyed
990 # XXX it might be nice if we could take the list of destroyed
994 # nodes, but I don't see an easy way for rollback() to do that
991 # nodes, but I don't see an easy way for rollback() to do that
995
992
996 # Ensure the persistent tag cache is updated. Doing it now
993 # Ensure the persistent tag cache is updated. Doing it now
997 # means that the tag cache only has to worry about destroyed
994 # means that the tag cache only has to worry about destroyed
998 # heads immediately after a strip/rollback. That in turn
995 # heads immediately after a strip/rollback. That in turn
999 # guarantees that "cachetip == currenttip" (comparing both rev
996 # guarantees that "cachetip == currenttip" (comparing both rev
1000 # and node) always means no nodes have been added or destroyed.
997 # and node) always means no nodes have been added or destroyed.
1001
998
1002 # XXX this is suboptimal when qrefresh'ing: we strip the current
999 # XXX this is suboptimal when qrefresh'ing: we strip the current
1003 # head, refresh the tag cache, then immediately add a new head.
1000 # head, refresh the tag cache, then immediately add a new head.
1004 # But I think doing it this way is necessary for the "instant
1001 # But I think doing it this way is necessary for the "instant
1005 # tag cache retrieval" case to work.
1002 # tag cache retrieval" case to work.
1006 self.invalidatecaches()
1003 self.invalidatecaches()
1007
1004
1008 def walk(self, match, node=None):
1005 def walk(self, match, node=None):
1009 '''
1006 '''
1010 walk recursively through the directory tree or a given
1007 walk recursively through the directory tree or a given
1011 changeset, finding all files matched by the match
1008 changeset, finding all files matched by the match
1012 function
1009 function
1013 '''
1010 '''
1014 return self[node].walk(match)
1011 return self[node].walk(match)
1015
1012
1016 def status(self, node1='.', node2=None, match=None,
1013 def status(self, node1='.', node2=None, match=None,
1017 ignored=False, clean=False, unknown=False):
1014 ignored=False, clean=False, unknown=False):
1018 """return status of files between two nodes or node and working directory
1015 """return status of files between two nodes or node and working directory
1019
1016
1020 If node1 is None, use the first dirstate parent instead.
1017 If node1 is None, use the first dirstate parent instead.
1021 If node2 is None, compare node1 with working directory.
1018 If node2 is None, compare node1 with working directory.
1022 """
1019 """
1023
1020
1024 def mfmatches(ctx):
1021 def mfmatches(ctx):
1025 mf = ctx.manifest().copy()
1022 mf = ctx.manifest().copy()
1026 for fn in mf.keys():
1023 for fn in mf.keys():
1027 if not match(fn):
1024 if not match(fn):
1028 del mf[fn]
1025 del mf[fn]
1029 return mf
1026 return mf
1030
1027
1031 if isinstance(node1, context.changectx):
1028 if isinstance(node1, context.changectx):
1032 ctx1 = node1
1029 ctx1 = node1
1033 else:
1030 else:
1034 ctx1 = self[node1]
1031 ctx1 = self[node1]
1035 if isinstance(node2, context.changectx):
1032 if isinstance(node2, context.changectx):
1036 ctx2 = node2
1033 ctx2 = node2
1037 else:
1034 else:
1038 ctx2 = self[node2]
1035 ctx2 = self[node2]
1039
1036
1040 working = ctx2.rev() is None
1037 working = ctx2.rev() is None
1041 parentworking = working and ctx1 == self['.']
1038 parentworking = working and ctx1 == self['.']
1042 match = match or matchmod.always(self.root, self.getcwd())
1039 match = match or matchmod.always(self.root, self.getcwd())
1043 listignored, listclean, listunknown = ignored, clean, unknown
1040 listignored, listclean, listunknown = ignored, clean, unknown
1044
1041
1045 # load earliest manifest first for caching reasons
1042 # load earliest manifest first for caching reasons
1046 if not working and ctx2.rev() < ctx1.rev():
1043 if not working and ctx2.rev() < ctx1.rev():
1047 ctx2.manifest()
1044 ctx2.manifest()
1048
1045
1049 if not parentworking:
1046 if not parentworking:
1050 def bad(f, msg):
1047 def bad(f, msg):
1051 if f not in ctx1:
1048 if f not in ctx1:
1052 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1049 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1053 match.bad = bad
1050 match.bad = bad
1054
1051
1055 if working: # we need to scan the working dir
1052 if working: # we need to scan the working dir
1056 subrepos = []
1053 subrepos = []
1057 if '.hgsub' in self.dirstate:
1054 if '.hgsub' in self.dirstate:
1058 subrepos = ctx1.substate.keys()
1055 subrepos = ctx1.substate.keys()
1059 s = self.dirstate.status(match, subrepos, listignored,
1056 s = self.dirstate.status(match, subrepos, listignored,
1060 listclean, listunknown)
1057 listclean, listunknown)
1061 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1058 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1062
1059
1063 # check for any possibly clean files
1060 # check for any possibly clean files
1064 if parentworking and cmp:
1061 if parentworking and cmp:
1065 fixup = []
1062 fixup = []
1066 # do a full compare of any files that might have changed
1063 # do a full compare of any files that might have changed
1067 for f in sorted(cmp):
1064 for f in sorted(cmp):
1068 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1065 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1069 or ctx1[f].cmp(ctx2[f])):
1066 or ctx1[f].cmp(ctx2[f])):
1070 modified.append(f)
1067 modified.append(f)
1071 else:
1068 else:
1072 fixup.append(f)
1069 fixup.append(f)
1073
1070
1074 # update dirstate for files that are actually clean
1071 # update dirstate for files that are actually clean
1075 if fixup:
1072 if fixup:
1076 if listclean:
1073 if listclean:
1077 clean += fixup
1074 clean += fixup
1078
1075
1079 try:
1076 try:
1080 # updating the dirstate is optional
1077 # updating the dirstate is optional
1081 # so we don't wait on the lock
1078 # so we don't wait on the lock
1082 wlock = self.wlock(False)
1079 wlock = self.wlock(False)
1083 try:
1080 try:
1084 for f in fixup:
1081 for f in fixup:
1085 self.dirstate.normal(f)
1082 self.dirstate.normal(f)
1086 finally:
1083 finally:
1087 wlock.release()
1084 wlock.release()
1088 except error.LockError:
1085 except error.LockError:
1089 pass
1086 pass
1090
1087
1091 if not parentworking:
1088 if not parentworking:
1092 mf1 = mfmatches(ctx1)
1089 mf1 = mfmatches(ctx1)
1093 if working:
1090 if working:
1094 # we are comparing working dir against non-parent
1091 # we are comparing working dir against non-parent
1095 # generate a pseudo-manifest for the working dir
1092 # generate a pseudo-manifest for the working dir
1096 mf2 = mfmatches(self['.'])
1093 mf2 = mfmatches(self['.'])
1097 for f in cmp + modified + added:
1094 for f in cmp + modified + added:
1098 mf2[f] = None
1095 mf2[f] = None
1099 mf2.set(f, ctx2.flags(f))
1096 mf2.set(f, ctx2.flags(f))
1100 for f in removed:
1097 for f in removed:
1101 if f in mf2:
1098 if f in mf2:
1102 del mf2[f]
1099 del mf2[f]
1103 else:
1100 else:
1104 # we are comparing two revisions
1101 # we are comparing two revisions
1105 deleted, unknown, ignored = [], [], []
1102 deleted, unknown, ignored = [], [], []
1106 mf2 = mfmatches(ctx2)
1103 mf2 = mfmatches(ctx2)
1107
1104
1108 modified, added, clean = [], [], []
1105 modified, added, clean = [], [], []
1109 for fn in mf2:
1106 for fn in mf2:
1110 if fn in mf1:
1107 if fn in mf1:
1111 if (mf1.flags(fn) != mf2.flags(fn) or
1108 if (mf1.flags(fn) != mf2.flags(fn) or
1112 (mf1[fn] != mf2[fn] and
1109 (mf1[fn] != mf2[fn] and
1113 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1110 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1114 modified.append(fn)
1111 modified.append(fn)
1115 elif listclean:
1112 elif listclean:
1116 clean.append(fn)
1113 clean.append(fn)
1117 del mf1[fn]
1114 del mf1[fn]
1118 else:
1115 else:
1119 added.append(fn)
1116 added.append(fn)
1120 removed = mf1.keys()
1117 removed = mf1.keys()
1121
1118
1122 r = modified, added, removed, deleted, unknown, ignored, clean
1119 r = modified, added, removed, deleted, unknown, ignored, clean
1123 [l.sort() for l in r]
1120 [l.sort() for l in r]
1124 return r
1121 return r
1125
1122
1126 def heads(self, start=None):
1123 def heads(self, start=None):
1127 heads = self.changelog.heads(start)
1124 heads = self.changelog.heads(start)
1128 # sort the output in rev descending order
1125 # sort the output in rev descending order
1129 heads = [(-self.changelog.rev(h), h) for h in heads]
1126 heads = [(-self.changelog.rev(h), h) for h in heads]
1130 return [n for (r, n) in sorted(heads)]
1127 return [n for (r, n) in sorted(heads)]
1131
1128
1132 def branchheads(self, branch=None, start=None, closed=False):
1129 def branchheads(self, branch=None, start=None, closed=False):
1133 '''return a (possibly filtered) list of heads for the given branch
1130 '''return a (possibly filtered) list of heads for the given branch
1134
1131
1135 Heads are returned in topological order, from newest to oldest.
1132 Heads are returned in topological order, from newest to oldest.
1136 If branch is None, use the dirstate branch.
1133 If branch is None, use the dirstate branch.
1137 If start is not None, return only heads reachable from start.
1134 If start is not None, return only heads reachable from start.
1138 If closed is True, return heads that are marked as closed as well.
1135 If closed is True, return heads that are marked as closed as well.
1139 '''
1136 '''
1140 if branch is None:
1137 if branch is None:
1141 branch = self[None].branch()
1138 branch = self[None].branch()
1142 branches = self.branchmap()
1139 branches = self.branchmap()
1143 if branch not in branches:
1140 if branch not in branches:
1144 return []
1141 return []
1145 # the cache returns heads ordered lowest to highest
1142 # the cache returns heads ordered lowest to highest
1146 bheads = list(reversed(branches[branch]))
1143 bheads = list(reversed(branches[branch]))
1147 if start is not None:
1144 if start is not None:
1148 # filter out the heads that cannot be reached from startrev
1145 # filter out the heads that cannot be reached from startrev
1149 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1146 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1150 bheads = [h for h in bheads if h in fbheads]
1147 bheads = [h for h in bheads if h in fbheads]
1151 if not closed:
1148 if not closed:
1152 bheads = [h for h in bheads if
1149 bheads = [h for h in bheads if
1153 ('close' not in self.changelog.read(h)[5])]
1150 ('close' not in self.changelog.read(h)[5])]
1154 return bheads
1151 return bheads
1155
1152
1156 def branches(self, nodes):
1153 def branches(self, nodes):
1157 if not nodes:
1154 if not nodes:
1158 nodes = [self.changelog.tip()]
1155 nodes = [self.changelog.tip()]
1159 b = []
1156 b = []
1160 for n in nodes:
1157 for n in nodes:
1161 t = n
1158 t = n
1162 while 1:
1159 while 1:
1163 p = self.changelog.parents(n)
1160 p = self.changelog.parents(n)
1164 if p[1] != nullid or p[0] == nullid:
1161 if p[1] != nullid or p[0] == nullid:
1165 b.append((t, n, p[0], p[1]))
1162 b.append((t, n, p[0], p[1]))
1166 break
1163 break
1167 n = p[0]
1164 n = p[0]
1168 return b
1165 return b
1169
1166
1170 def between(self, pairs):
1167 def between(self, pairs):
1171 r = []
1168 r = []
1172
1169
1173 for top, bottom in pairs:
1170 for top, bottom in pairs:
1174 n, l, i = top, [], 0
1171 n, l, i = top, [], 0
1175 f = 1
1172 f = 1
1176
1173
1177 while n != bottom and n != nullid:
1174 while n != bottom and n != nullid:
1178 p = self.changelog.parents(n)[0]
1175 p = self.changelog.parents(n)[0]
1179 if i == f:
1176 if i == f:
1180 l.append(n)
1177 l.append(n)
1181 f = f * 2
1178 f = f * 2
1182 n = p
1179 n = p
1183 i += 1
1180 i += 1
1184
1181
1185 r.append(l)
1182 r.append(l)
1186
1183
1187 return r
1184 return r
1188
1185
1189 def pull(self, remote, heads=None, force=False):
1186 def pull(self, remote, heads=None, force=False):
1190 lock = self.lock()
1187 lock = self.lock()
1191 try:
1188 try:
1192 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1189 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1193 force=force)
1190 force=force)
1194 common, fetch, rheads = tmp
1191 common, fetch, rheads = tmp
1195 if not fetch:
1192 if not fetch:
1196 self.ui.status(_("no changes found\n"))
1193 self.ui.status(_("no changes found\n"))
1197 return 0
1194 return 0
1198
1195
1199 if fetch == [nullid]:
1196 if fetch == [nullid]:
1200 self.ui.status(_("requesting all changes\n"))
1197 self.ui.status(_("requesting all changes\n"))
1201 elif heads is None and remote.capable('changegroupsubset'):
1198 elif heads is None and remote.capable('changegroupsubset'):
1202 # issue1320, avoid a race if remote changed after discovery
1199 # issue1320, avoid a race if remote changed after discovery
1203 heads = rheads
1200 heads = rheads
1204
1201
1205 if heads is None:
1202 if heads is None:
1206 cg = remote.changegroup(fetch, 'pull')
1203 cg = remote.changegroup(fetch, 'pull')
1207 else:
1204 else:
1208 if not remote.capable('changegroupsubset'):
1205 if not remote.capable('changegroupsubset'):
1209 raise util.Abort(_("Partial pull cannot be done because "
1206 raise util.Abort(_("Partial pull cannot be done because "
1210 "other repository doesn't support "
1207 "other repository doesn't support "
1211 "changegroupsubset."))
1208 "changegroupsubset."))
1212 cg = remote.changegroupsubset(fetch, heads, 'pull')
1209 cg = remote.changegroupsubset(fetch, heads, 'pull')
1213 return self.addchangegroup(cg, 'pull', remote.url(), lock=lock)
1210 return self.addchangegroup(cg, 'pull', remote.url(), lock=lock)
1214 finally:
1211 finally:
1215 lock.release()
1212 lock.release()
1216
1213
1217 def push(self, remote, force=False, revs=None, newbranch=False):
1214 def push(self, remote, force=False, revs=None, newbranch=False):
1218 '''Push outgoing changesets (limited by revs) from the current
1215 '''Push outgoing changesets (limited by revs) from the current
1219 repository to remote. Return an integer:
1216 repository to remote. Return an integer:
1220 - 0 means HTTP error *or* nothing to push
1217 - 0 means HTTP error *or* nothing to push
1221 - 1 means we pushed and remote head count is unchanged *or*
1218 - 1 means we pushed and remote head count is unchanged *or*
1222 we have outgoing changesets but refused to push
1219 we have outgoing changesets but refused to push
1223 - other values as described by addchangegroup()
1220 - other values as described by addchangegroup()
1224 '''
1221 '''
1225 # there are two ways to push to remote repo:
1222 # there are two ways to push to remote repo:
1226 #
1223 #
1227 # addchangegroup assumes local user can lock remote
1224 # addchangegroup assumes local user can lock remote
1228 # repo (local filesystem, old ssh servers).
1225 # repo (local filesystem, old ssh servers).
1229 #
1226 #
1230 # unbundle assumes local user cannot lock remote repo (new ssh
1227 # unbundle assumes local user cannot lock remote repo (new ssh
1231 # servers, http servers).
1228 # servers, http servers).
1232
1229
1233 lock = None
1230 lock = None
1234 unbundle = remote.capable('unbundle')
1231 unbundle = remote.capable('unbundle')
1235 if not unbundle:
1232 if not unbundle:
1236 lock = remote.lock()
1233 lock = remote.lock()
1237 try:
1234 try:
1238 ret = discovery.prepush(self, remote, force, revs, newbranch)
1235 ret = discovery.prepush(self, remote, force, revs, newbranch)
1239 if ret[0] is None:
1236 if ret[0] is None:
1240 # and here we return 0 for "nothing to push" or 1 for
1237 # and here we return 0 for "nothing to push" or 1 for
1241 # "something to push but I refuse"
1238 # "something to push but I refuse"
1242 return ret[1]
1239 return ret[1]
1243
1240
1244 cg, remote_heads = ret
1241 cg, remote_heads = ret
1245 if unbundle:
1242 if unbundle:
1246 # local repo finds heads on server, finds out what revs it must
1243 # local repo finds heads on server, finds out what revs it must
1247 # push. once revs transferred, if server finds it has
1244 # push. once revs transferred, if server finds it has
1248 # different heads (someone else won commit/push race), server
1245 # different heads (someone else won commit/push race), server
1249 # aborts.
1246 # aborts.
1250 if force:
1247 if force:
1251 remote_heads = ['force']
1248 remote_heads = ['force']
1252 # ssh: return remote's addchangegroup()
1249 # ssh: return remote's addchangegroup()
1253 # http: return remote's addchangegroup() or 0 for error
1250 # http: return remote's addchangegroup() or 0 for error
1254 return remote.unbundle(cg, remote_heads, 'push')
1251 return remote.unbundle(cg, remote_heads, 'push')
1255 else:
1252 else:
1256 # we return an integer indicating remote head count change
1253 # we return an integer indicating remote head count change
1257 return remote.addchangegroup(cg, 'push', self.url(), lock=lock)
1254 return remote.addchangegroup(cg, 'push', self.url(), lock=lock)
1258 finally:
1255 finally:
1259 if lock is not None:
1256 if lock is not None:
1260 lock.release()
1257 lock.release()
1261
1258
1262 def changegroupinfo(self, nodes, source):
1259 def changegroupinfo(self, nodes, source):
1263 if self.ui.verbose or source == 'bundle':
1260 if self.ui.verbose or source == 'bundle':
1264 self.ui.status(_("%d changesets found\n") % len(nodes))
1261 self.ui.status(_("%d changesets found\n") % len(nodes))
1265 if self.ui.debugflag:
1262 if self.ui.debugflag:
1266 self.ui.debug("list of changesets:\n")
1263 self.ui.debug("list of changesets:\n")
1267 for node in nodes:
1264 for node in nodes:
1268 self.ui.debug("%s\n" % hex(node))
1265 self.ui.debug("%s\n" % hex(node))
1269
1266
1270 def changegroupsubset(self, bases, heads, source, extranodes=None):
1267 def changegroupsubset(self, bases, heads, source, extranodes=None):
1271 """Compute a changegroup consisting of all the nodes that are
1268 """Compute a changegroup consisting of all the nodes that are
1272 descendents of any of the bases and ancestors of any of the heads.
1269 descendents of any of the bases and ancestors of any of the heads.
1273 Return a chunkbuffer object whose read() method will return
1270 Return a chunkbuffer object whose read() method will return
1274 successive changegroup chunks.
1271 successive changegroup chunks.
1275
1272
1276 It is fairly complex as determining which filenodes and which
1273 It is fairly complex as determining which filenodes and which
1277 manifest nodes need to be included for the changeset to be complete
1274 manifest nodes need to be included for the changeset to be complete
1278 is non-trivial.
1275 is non-trivial.
1279
1276
1280 Another wrinkle is doing the reverse, figuring out which changeset in
1277 Another wrinkle is doing the reverse, figuring out which changeset in
1281 the changegroup a particular filenode or manifestnode belongs to.
1278 the changegroup a particular filenode or manifestnode belongs to.
1282
1279
1283 The caller can specify some nodes that must be included in the
1280 The caller can specify some nodes that must be included in the
1284 changegroup using the extranodes argument. It should be a dict
1281 changegroup using the extranodes argument. It should be a dict
1285 where the keys are the filenames (or 1 for the manifest), and the
1282 where the keys are the filenames (or 1 for the manifest), and the
1286 values are lists of (node, linknode) tuples, where node is a wanted
1283 values are lists of (node, linknode) tuples, where node is a wanted
1287 node and linknode is the changelog node that should be transmitted as
1284 node and linknode is the changelog node that should be transmitted as
1288 the linkrev.
1285 the linkrev.
1289 """
1286 """
1290
1287
1291 # Set up some initial variables
1288 # Set up some initial variables
1292 # Make it easy to refer to self.changelog
1289 # Make it easy to refer to self.changelog
1293 cl = self.changelog
1290 cl = self.changelog
1294 # Compute the list of changesets in this changegroup.
1291 # Compute the list of changesets in this changegroup.
1295 # Some bases may turn out to be superfluous, and some heads may be
1292 # Some bases may turn out to be superfluous, and some heads may be
1296 # too. nodesbetween will return the minimal set of bases and heads
1293 # too. nodesbetween will return the minimal set of bases and heads
1297 # necessary to re-create the changegroup.
1294 # necessary to re-create the changegroup.
1298 if not bases:
1295 if not bases:
1299 bases = [nullid]
1296 bases = [nullid]
1300 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1297 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1301
1298
1302 if extranodes is None:
1299 if extranodes is None:
1303 # can we go through the fast path ?
1300 # can we go through the fast path ?
1304 heads.sort()
1301 heads.sort()
1305 allheads = self.heads()
1302 allheads = self.heads()
1306 allheads.sort()
1303 allheads.sort()
1307 if heads == allheads:
1304 if heads == allheads:
1308 return self._changegroup(msng_cl_lst, source)
1305 return self._changegroup(msng_cl_lst, source)
1309
1306
1310 # slow path
1307 # slow path
1311 self.hook('preoutgoing', throw=True, source=source)
1308 self.hook('preoutgoing', throw=True, source=source)
1312
1309
1313 self.changegroupinfo(msng_cl_lst, source)
1310 self.changegroupinfo(msng_cl_lst, source)
1314
1311
1315 # We assume that all ancestors of bases are known
1312 # We assume that all ancestors of bases are known
1316 commonrevs = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1313 commonrevs = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1317
1314
1318 # Make it easy to refer to self.manifest
1315 # Make it easy to refer to self.manifest
1319 mnfst = self.manifest
1316 mnfst = self.manifest
1320 # We don't know which manifests are missing yet
1317 # We don't know which manifests are missing yet
1321 msng_mnfst_set = {}
1318 msng_mnfst_set = {}
1322 # Nor do we know which filenodes are missing.
1319 # Nor do we know which filenodes are missing.
1323 msng_filenode_set = {}
1320 msng_filenode_set = {}
1324
1321
1325 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1322 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1326 junk = None
1323 junk = None
1327
1324
1328 # A changeset always belongs to itself, so the changenode lookup
1325 # A changeset always belongs to itself, so the changenode lookup
1329 # function for a changenode is identity.
1326 # function for a changenode is identity.
1330 def identity(x):
1327 def identity(x):
1331 return x
1328 return x
1332
1329
1333 # A function generating function that sets up the initial environment
1330 # A function generating function that sets up the initial environment
1334 # the inner function.
1331 # the inner function.
1335 def filenode_collector(changedfiles):
1332 def filenode_collector(changedfiles):
1336 # This gathers information from each manifestnode included in the
1333 # This gathers information from each manifestnode included in the
1337 # changegroup about which filenodes the manifest node references
1334 # changegroup about which filenodes the manifest node references
1338 # so we can include those in the changegroup too.
1335 # so we can include those in the changegroup too.
1339 #
1336 #
1340 # It also remembers which changenode each filenode belongs to. It
1337 # It also remembers which changenode each filenode belongs to. It
1341 # does this by assuming the a filenode belongs to the changenode
1338 # does this by assuming the a filenode belongs to the changenode
1342 # the first manifest that references it belongs to.
1339 # the first manifest that references it belongs to.
1343 def collect_msng_filenodes(mnfstnode):
1340 def collect_msng_filenodes(mnfstnode):
1344 r = mnfst.rev(mnfstnode)
1341 r = mnfst.rev(mnfstnode)
1345 if r - 1 in mnfst.parentrevs(r):
1342 if r - 1 in mnfst.parentrevs(r):
1346 # If the previous rev is one of the parents,
1343 # If the previous rev is one of the parents,
1347 # we only need to see a diff.
1344 # we only need to see a diff.
1348 deltamf = mnfst.readdelta(mnfstnode)
1345 deltamf = mnfst.readdelta(mnfstnode)
1349 # For each line in the delta
1346 # For each line in the delta
1350 for f, fnode in deltamf.iteritems():
1347 for f, fnode in deltamf.iteritems():
1351 # And if the file is in the list of files we care
1348 # And if the file is in the list of files we care
1352 # about.
1349 # about.
1353 if f in changedfiles:
1350 if f in changedfiles:
1354 # Get the changenode this manifest belongs to
1351 # Get the changenode this manifest belongs to
1355 clnode = msng_mnfst_set[mnfstnode]
1352 clnode = msng_mnfst_set[mnfstnode]
1356 # Create the set of filenodes for the file if
1353 # Create the set of filenodes for the file if
1357 # there isn't one already.
1354 # there isn't one already.
1358 ndset = msng_filenode_set.setdefault(f, {})
1355 ndset = msng_filenode_set.setdefault(f, {})
1359 # And set the filenode's changelog node to the
1356 # And set the filenode's changelog node to the
1360 # manifest's if it hasn't been set already.
1357 # manifest's if it hasn't been set already.
1361 ndset.setdefault(fnode, clnode)
1358 ndset.setdefault(fnode, clnode)
1362 else:
1359 else:
1363 # Otherwise we need a full manifest.
1360 # Otherwise we need a full manifest.
1364 m = mnfst.read(mnfstnode)
1361 m = mnfst.read(mnfstnode)
1365 # For every file in we care about.
1362 # For every file in we care about.
1366 for f in changedfiles:
1363 for f in changedfiles:
1367 fnode = m.get(f, None)
1364 fnode = m.get(f, None)
1368 # If it's in the manifest
1365 # If it's in the manifest
1369 if fnode is not None:
1366 if fnode is not None:
1370 # See comments above.
1367 # See comments above.
1371 clnode = msng_mnfst_set[mnfstnode]
1368 clnode = msng_mnfst_set[mnfstnode]
1372 ndset = msng_filenode_set.setdefault(f, {})
1369 ndset = msng_filenode_set.setdefault(f, {})
1373 ndset.setdefault(fnode, clnode)
1370 ndset.setdefault(fnode, clnode)
1374 return collect_msng_filenodes
1371 return collect_msng_filenodes
1375
1372
1376 # If we determine that a particular file or manifest node must be a
1373 # If we determine that a particular file or manifest node must be a
1377 # node that the recipient of the changegroup will already have, we can
1374 # node that the recipient of the changegroup will already have, we can
1378 # also assume the recipient will have all the parents. This function
1375 # also assume the recipient will have all the parents. This function
1379 # prunes them from the set of missing nodes.
1376 # prunes them from the set of missing nodes.
1380 def prune(revlog, missingnodes):
1377 def prune(revlog, missingnodes):
1381 hasset = set()
1378 hasset = set()
1382 # If a 'missing' filenode thinks it belongs to a changenode we
1379 # If a 'missing' filenode thinks it belongs to a changenode we
1383 # assume the recipient must have, then the recipient must have
1380 # assume the recipient must have, then the recipient must have
1384 # that filenode.
1381 # that filenode.
1385 for n in missingnodes:
1382 for n in missingnodes:
1386 clrev = revlog.linkrev(revlog.rev(n))
1383 clrev = revlog.linkrev(revlog.rev(n))
1387 if clrev in commonrevs:
1384 if clrev in commonrevs:
1388 hasset.add(n)
1385 hasset.add(n)
1389 for n in hasset:
1386 for n in hasset:
1390 missingnodes.pop(n, None)
1387 missingnodes.pop(n, None)
1391 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1388 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1392 missingnodes.pop(revlog.node(r), None)
1389 missingnodes.pop(revlog.node(r), None)
1393
1390
1394 # Add the nodes that were explicitly requested.
1391 # Add the nodes that were explicitly requested.
1395 def add_extra_nodes(name, nodes):
1392 def add_extra_nodes(name, nodes):
1396 if not extranodes or name not in extranodes:
1393 if not extranodes or name not in extranodes:
1397 return
1394 return
1398
1395
1399 for node, linknode in extranodes[name]:
1396 for node, linknode in extranodes[name]:
1400 if node not in nodes:
1397 if node not in nodes:
1401 nodes[node] = linknode
1398 nodes[node] = linknode
1402
1399
1403 # Now that we have all theses utility functions to help out and
1400 # Now that we have all theses utility functions to help out and
1404 # logically divide up the task, generate the group.
1401 # logically divide up the task, generate the group.
1405 def gengroup():
1402 def gengroup():
1406 # The set of changed files starts empty.
1403 # The set of changed files starts empty.
1407 changedfiles = set()
1404 changedfiles = set()
1408 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1405 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1409
1406
1410 # Create a changenode group generator that will call our functions
1407 # Create a changenode group generator that will call our functions
1411 # back to lookup the owning changenode and collect information.
1408 # back to lookup the owning changenode and collect information.
1412 group = cl.group(msng_cl_lst, identity, collect)
1409 group = cl.group(msng_cl_lst, identity, collect)
1413 for cnt, chnk in enumerate(group):
1410 for cnt, chnk in enumerate(group):
1414 yield chnk
1411 yield chnk
1415 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1412 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1416 self.ui.progress(_('bundling changes'), None)
1413 self.ui.progress(_('bundling changes'), None)
1417
1414
1418 prune(mnfst, msng_mnfst_set)
1415 prune(mnfst, msng_mnfst_set)
1419 add_extra_nodes(1, msng_mnfst_set)
1416 add_extra_nodes(1, msng_mnfst_set)
1420 msng_mnfst_lst = msng_mnfst_set.keys()
1417 msng_mnfst_lst = msng_mnfst_set.keys()
1421 # Sort the manifestnodes by revision number.
1418 # Sort the manifestnodes by revision number.
1422 msng_mnfst_lst.sort(key=mnfst.rev)
1419 msng_mnfst_lst.sort(key=mnfst.rev)
1423 # Create a generator for the manifestnodes that calls our lookup
1420 # Create a generator for the manifestnodes that calls our lookup
1424 # and data collection functions back.
1421 # and data collection functions back.
1425 group = mnfst.group(msng_mnfst_lst,
1422 group = mnfst.group(msng_mnfst_lst,
1426 lambda mnode: msng_mnfst_set[mnode],
1423 lambda mnode: msng_mnfst_set[mnode],
1427 filenode_collector(changedfiles))
1424 filenode_collector(changedfiles))
1428 for cnt, chnk in enumerate(group):
1425 for cnt, chnk in enumerate(group):
1429 yield chnk
1426 yield chnk
1430 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1427 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1431 self.ui.progress(_('bundling manifests'), None)
1428 self.ui.progress(_('bundling manifests'), None)
1432
1429
1433 # These are no longer needed, dereference and toss the memory for
1430 # These are no longer needed, dereference and toss the memory for
1434 # them.
1431 # them.
1435 msng_mnfst_lst = None
1432 msng_mnfst_lst = None
1436 msng_mnfst_set.clear()
1433 msng_mnfst_set.clear()
1437
1434
1438 if extranodes:
1435 if extranodes:
1439 for fname in extranodes:
1436 for fname in extranodes:
1440 if isinstance(fname, int):
1437 if isinstance(fname, int):
1441 continue
1438 continue
1442 msng_filenode_set.setdefault(fname, {})
1439 msng_filenode_set.setdefault(fname, {})
1443 changedfiles.add(fname)
1440 changedfiles.add(fname)
1444 # Go through all our files in order sorted by name.
1441 # Go through all our files in order sorted by name.
1445 cnt = 0
1442 cnt = 0
1446 for fname in sorted(changedfiles):
1443 for fname in sorted(changedfiles):
1447 filerevlog = self.file(fname)
1444 filerevlog = self.file(fname)
1448 if not len(filerevlog):
1445 if not len(filerevlog):
1449 raise util.Abort(_("empty or missing revlog for %s") % fname)
1446 raise util.Abort(_("empty or missing revlog for %s") % fname)
1450 # Toss out the filenodes that the recipient isn't really
1447 # Toss out the filenodes that the recipient isn't really
1451 # missing.
1448 # missing.
1452 missingfnodes = msng_filenode_set.pop(fname, {})
1449 missingfnodes = msng_filenode_set.pop(fname, {})
1453 prune(filerevlog, missingfnodes)
1450 prune(filerevlog, missingfnodes)
1454 add_extra_nodes(fname, missingfnodes)
1451 add_extra_nodes(fname, missingfnodes)
1455 # If any filenodes are left, generate the group for them,
1452 # If any filenodes are left, generate the group for them,
1456 # otherwise don't bother.
1453 # otherwise don't bother.
1457 if missingfnodes:
1454 if missingfnodes:
1458 yield changegroup.chunkheader(len(fname))
1455 yield changegroup.chunkheader(len(fname))
1459 yield fname
1456 yield fname
1460 # Sort the filenodes by their revision # (topological order)
1457 # Sort the filenodes by their revision # (topological order)
1461 nodeiter = list(missingfnodes)
1458 nodeiter = list(missingfnodes)
1462 nodeiter.sort(key=filerevlog.rev)
1459 nodeiter.sort(key=filerevlog.rev)
1463 # Create a group generator and only pass in a changenode
1460 # Create a group generator and only pass in a changenode
1464 # lookup function as we need to collect no information
1461 # lookup function as we need to collect no information
1465 # from filenodes.
1462 # from filenodes.
1466 group = filerevlog.group(nodeiter,
1463 group = filerevlog.group(nodeiter,
1467 lambda fnode: missingfnodes[fnode])
1464 lambda fnode: missingfnodes[fnode])
1468 for chnk in group:
1465 for chnk in group:
1469 self.ui.progress(
1466 self.ui.progress(
1470 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1467 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1471 cnt += 1
1468 cnt += 1
1472 yield chnk
1469 yield chnk
1473 # Signal that no more groups are left.
1470 # Signal that no more groups are left.
1474 yield changegroup.closechunk()
1471 yield changegroup.closechunk()
1475 self.ui.progress(_('bundling files'), None)
1472 self.ui.progress(_('bundling files'), None)
1476
1473
1477 if msng_cl_lst:
1474 if msng_cl_lst:
1478 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1475 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1479
1476
1480 return util.chunkbuffer(gengroup())
1477 return util.chunkbuffer(gengroup())
1481
1478
1482 def changegroup(self, basenodes, source):
1479 def changegroup(self, basenodes, source):
1483 # to avoid a race we use changegroupsubset() (issue1320)
1480 # to avoid a race we use changegroupsubset() (issue1320)
1484 return self.changegroupsubset(basenodes, self.heads(), source)
1481 return self.changegroupsubset(basenodes, self.heads(), source)
1485
1482
1486 def _changegroup(self, nodes, source):
1483 def _changegroup(self, nodes, source):
1487 """Compute the changegroup of all nodes that we have that a recipient
1484 """Compute the changegroup of all nodes that we have that a recipient
1488 doesn't. Return a chunkbuffer object whose read() method will return
1485 doesn't. Return a chunkbuffer object whose read() method will return
1489 successive changegroup chunks.
1486 successive changegroup chunks.
1490
1487
1491 This is much easier than the previous function as we can assume that
1488 This is much easier than the previous function as we can assume that
1492 the recipient has any changenode we aren't sending them.
1489 the recipient has any changenode we aren't sending them.
1493
1490
1494 nodes is the set of nodes to send"""
1491 nodes is the set of nodes to send"""
1495
1492
1496 self.hook('preoutgoing', throw=True, source=source)
1493 self.hook('preoutgoing', throw=True, source=source)
1497
1494
1498 cl = self.changelog
1495 cl = self.changelog
1499 revset = set([cl.rev(n) for n in nodes])
1496 revset = set([cl.rev(n) for n in nodes])
1500 self.changegroupinfo(nodes, source)
1497 self.changegroupinfo(nodes, source)
1501
1498
1502 def identity(x):
1499 def identity(x):
1503 return x
1500 return x
1504
1501
1505 def gennodelst(log):
1502 def gennodelst(log):
1506 for r in log:
1503 for r in log:
1507 if log.linkrev(r) in revset:
1504 if log.linkrev(r) in revset:
1508 yield log.node(r)
1505 yield log.node(r)
1509
1506
1510 def lookuplinkrev_func(revlog):
1507 def lookuplinkrev_func(revlog):
1511 def lookuplinkrev(n):
1508 def lookuplinkrev(n):
1512 return cl.node(revlog.linkrev(revlog.rev(n)))
1509 return cl.node(revlog.linkrev(revlog.rev(n)))
1513 return lookuplinkrev
1510 return lookuplinkrev
1514
1511
1515 def gengroup():
1512 def gengroup():
1516 '''yield a sequence of changegroup chunks (strings)'''
1513 '''yield a sequence of changegroup chunks (strings)'''
1517 # construct a list of all changed files
1514 # construct a list of all changed files
1518 changedfiles = set()
1515 changedfiles = set()
1519 mmfs = {}
1516 mmfs = {}
1520 collect = changegroup.collector(cl, mmfs, changedfiles)
1517 collect = changegroup.collector(cl, mmfs, changedfiles)
1521
1518
1522 for cnt, chnk in enumerate(cl.group(nodes, identity, collect)):
1519 for cnt, chnk in enumerate(cl.group(nodes, identity, collect)):
1523 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1520 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1524 yield chnk
1521 yield chnk
1525 self.ui.progress(_('bundling changes'), None)
1522 self.ui.progress(_('bundling changes'), None)
1526
1523
1527 mnfst = self.manifest
1524 mnfst = self.manifest
1528 nodeiter = gennodelst(mnfst)
1525 nodeiter = gennodelst(mnfst)
1529 for cnt, chnk in enumerate(mnfst.group(nodeiter,
1526 for cnt, chnk in enumerate(mnfst.group(nodeiter,
1530 lookuplinkrev_func(mnfst))):
1527 lookuplinkrev_func(mnfst))):
1531 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1528 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1532 yield chnk
1529 yield chnk
1533 self.ui.progress(_('bundling manifests'), None)
1530 self.ui.progress(_('bundling manifests'), None)
1534
1531
1535 cnt = 0
1532 cnt = 0
1536 for fname in sorted(changedfiles):
1533 for fname in sorted(changedfiles):
1537 filerevlog = self.file(fname)
1534 filerevlog = self.file(fname)
1538 if not len(filerevlog):
1535 if not len(filerevlog):
1539 raise util.Abort(_("empty or missing revlog for %s") % fname)
1536 raise util.Abort(_("empty or missing revlog for %s") % fname)
1540 nodeiter = gennodelst(filerevlog)
1537 nodeiter = gennodelst(filerevlog)
1541 nodeiter = list(nodeiter)
1538 nodeiter = list(nodeiter)
1542 if nodeiter:
1539 if nodeiter:
1543 yield changegroup.chunkheader(len(fname))
1540 yield changegroup.chunkheader(len(fname))
1544 yield fname
1541 yield fname
1545 lookup = lookuplinkrev_func(filerevlog)
1542 lookup = lookuplinkrev_func(filerevlog)
1546 for chnk in filerevlog.group(nodeiter, lookup):
1543 for chnk in filerevlog.group(nodeiter, lookup):
1547 self.ui.progress(
1544 self.ui.progress(
1548 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1545 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1549 cnt += 1
1546 cnt += 1
1550 yield chnk
1547 yield chnk
1551 self.ui.progress(_('bundling files'), None)
1548 self.ui.progress(_('bundling files'), None)
1552
1549
1553 yield changegroup.closechunk()
1550 yield changegroup.closechunk()
1554
1551
1555 if nodes:
1552 if nodes:
1556 self.hook('outgoing', node=hex(nodes[0]), source=source)
1553 self.hook('outgoing', node=hex(nodes[0]), source=source)
1557
1554
1558 return util.chunkbuffer(gengroup())
1555 return util.chunkbuffer(gengroup())
1559
1556
1560 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1557 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1561 """Add the changegroup returned by source.read() to this repo.
1558 """Add the changegroup returned by source.read() to this repo.
1562 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1559 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1563 the URL of the repo where this changegroup is coming from.
1560 the URL of the repo where this changegroup is coming from.
1564
1561
1565 Return an integer summarizing the change to this repo:
1562 Return an integer summarizing the change to this repo:
1566 - nothing changed or no source: 0
1563 - nothing changed or no source: 0
1567 - more heads than before: 1+added heads (2..n)
1564 - more heads than before: 1+added heads (2..n)
1568 - fewer heads than before: -1-removed heads (-2..-n)
1565 - fewer heads than before: -1-removed heads (-2..-n)
1569 - number of heads stays the same: 1
1566 - number of heads stays the same: 1
1570 """
1567 """
1571 def csmap(x):
1568 def csmap(x):
1572 self.ui.debug("add changeset %s\n" % short(x))
1569 self.ui.debug("add changeset %s\n" % short(x))
1573 return len(cl)
1570 return len(cl)
1574
1571
1575 def revmap(x):
1572 def revmap(x):
1576 return cl.rev(x)
1573 return cl.rev(x)
1577
1574
1578 if not source:
1575 if not source:
1579 return 0
1576 return 0
1580
1577
1581 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1578 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1582
1579
1583 changesets = files = revisions = 0
1580 changesets = files = revisions = 0
1584 efiles = set()
1581 efiles = set()
1585
1582
1586 # write changelog data to temp files so concurrent readers will not see
1583 # write changelog data to temp files so concurrent readers will not see
1587 # inconsistent view
1584 # inconsistent view
1588 cl = self.changelog
1585 cl = self.changelog
1589 cl.delayupdate()
1586 cl.delayupdate()
1590 oldheads = len(cl.heads())
1587 oldheads = len(cl.heads())
1591
1588
1592 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1589 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1593 try:
1590 try:
1594 trp = weakref.proxy(tr)
1591 trp = weakref.proxy(tr)
1595 # pull off the changeset group
1592 # pull off the changeset group
1596 self.ui.status(_("adding changesets\n"))
1593 self.ui.status(_("adding changesets\n"))
1597 clstart = len(cl)
1594 clstart = len(cl)
1598 class prog(object):
1595 class prog(object):
1599 step = _('changesets')
1596 step = _('changesets')
1600 count = 1
1597 count = 1
1601 ui = self.ui
1598 ui = self.ui
1602 total = None
1599 total = None
1603 def __call__(self):
1600 def __call__(self):
1604 self.ui.progress(self.step, self.count, unit=_('chunks'),
1601 self.ui.progress(self.step, self.count, unit=_('chunks'),
1605 total=self.total)
1602 total=self.total)
1606 self.count += 1
1603 self.count += 1
1607 pr = prog()
1604 pr = prog()
1608 chunkiter = changegroup.chunkiter(source, progress=pr)
1605 chunkiter = changegroup.chunkiter(source, progress=pr)
1609 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1606 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1610 raise util.Abort(_("received changelog group is empty"))
1607 raise util.Abort(_("received changelog group is empty"))
1611 clend = len(cl)
1608 clend = len(cl)
1612 changesets = clend - clstart
1609 changesets = clend - clstart
1613 for c in xrange(clstart, clend):
1610 for c in xrange(clstart, clend):
1614 efiles.update(self[c].files())
1611 efiles.update(self[c].files())
1615 efiles = len(efiles)
1612 efiles = len(efiles)
1616 self.ui.progress(_('changesets'), None)
1613 self.ui.progress(_('changesets'), None)
1617
1614
1618 # pull off the manifest group
1615 # pull off the manifest group
1619 self.ui.status(_("adding manifests\n"))
1616 self.ui.status(_("adding manifests\n"))
1620 pr.step = _('manifests')
1617 pr.step = _('manifests')
1621 pr.count = 1
1618 pr.count = 1
1622 pr.total = changesets # manifests <= changesets
1619 pr.total = changesets # manifests <= changesets
1623 chunkiter = changegroup.chunkiter(source, progress=pr)
1620 chunkiter = changegroup.chunkiter(source, progress=pr)
1624 # no need to check for empty manifest group here:
1621 # no need to check for empty manifest group here:
1625 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1622 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1626 # no new manifest will be created and the manifest group will
1623 # no new manifest will be created and the manifest group will
1627 # be empty during the pull
1624 # be empty during the pull
1628 self.manifest.addgroup(chunkiter, revmap, trp)
1625 self.manifest.addgroup(chunkiter, revmap, trp)
1629 self.ui.progress(_('manifests'), None)
1626 self.ui.progress(_('manifests'), None)
1630
1627
1631 needfiles = {}
1628 needfiles = {}
1632 if self.ui.configbool('server', 'validate', default=False):
1629 if self.ui.configbool('server', 'validate', default=False):
1633 # validate incoming csets have their manifests
1630 # validate incoming csets have their manifests
1634 for cset in xrange(clstart, clend):
1631 for cset in xrange(clstart, clend):
1635 mfest = self.changelog.read(self.changelog.node(cset))[0]
1632 mfest = self.changelog.read(self.changelog.node(cset))[0]
1636 mfest = self.manifest.readdelta(mfest)
1633 mfest = self.manifest.readdelta(mfest)
1637 # store file nodes we must see
1634 # store file nodes we must see
1638 for f, n in mfest.iteritems():
1635 for f, n in mfest.iteritems():
1639 needfiles.setdefault(f, set()).add(n)
1636 needfiles.setdefault(f, set()).add(n)
1640
1637
1641 # process the files
1638 # process the files
1642 self.ui.status(_("adding file changes\n"))
1639 self.ui.status(_("adding file changes\n"))
1643 pr.step = 'files'
1640 pr.step = 'files'
1644 pr.count = 1
1641 pr.count = 1
1645 pr.total = efiles
1642 pr.total = efiles
1646 while 1:
1643 while 1:
1647 f = changegroup.getchunk(source)
1644 f = changegroup.getchunk(source)
1648 if not f:
1645 if not f:
1649 break
1646 break
1650 self.ui.debug("adding %s revisions\n" % f)
1647 self.ui.debug("adding %s revisions\n" % f)
1651 pr()
1648 pr()
1652 fl = self.file(f)
1649 fl = self.file(f)
1653 o = len(fl)
1650 o = len(fl)
1654 chunkiter = changegroup.chunkiter(source)
1651 chunkiter = changegroup.chunkiter(source)
1655 if fl.addgroup(chunkiter, revmap, trp) is None:
1652 if fl.addgroup(chunkiter, revmap, trp) is None:
1656 raise util.Abort(_("received file revlog group is empty"))
1653 raise util.Abort(_("received file revlog group is empty"))
1657 revisions += len(fl) - o
1654 revisions += len(fl) - o
1658 files += 1
1655 files += 1
1659 if f in needfiles:
1656 if f in needfiles:
1660 needs = needfiles[f]
1657 needs = needfiles[f]
1661 for new in xrange(o, len(fl)):
1658 for new in xrange(o, len(fl)):
1662 n = fl.node(new)
1659 n = fl.node(new)
1663 if n in needs:
1660 if n in needs:
1664 needs.remove(n)
1661 needs.remove(n)
1665 if not needs:
1662 if not needs:
1666 del needfiles[f]
1663 del needfiles[f]
1667 self.ui.progress(_('files'), None)
1664 self.ui.progress(_('files'), None)
1668
1665
1669 for f, needs in needfiles.iteritems():
1666 for f, needs in needfiles.iteritems():
1670 fl = self.file(f)
1667 fl = self.file(f)
1671 for n in needs:
1668 for n in needs:
1672 try:
1669 try:
1673 fl.rev(n)
1670 fl.rev(n)
1674 except error.LookupError:
1671 except error.LookupError:
1675 raise util.Abort(
1672 raise util.Abort(
1676 _('missing file data for %s:%s - run hg verify') %
1673 _('missing file data for %s:%s - run hg verify') %
1677 (f, hex(n)))
1674 (f, hex(n)))
1678
1675
1679 newheads = len(cl.heads())
1676 newheads = len(cl.heads())
1680 heads = ""
1677 heads = ""
1681 if oldheads and newheads != oldheads:
1678 if oldheads and newheads != oldheads:
1682 heads = _(" (%+d heads)") % (newheads - oldheads)
1679 heads = _(" (%+d heads)") % (newheads - oldheads)
1683
1680
1684 self.ui.status(_("added %d changesets"
1681 self.ui.status(_("added %d changesets"
1685 " with %d changes to %d files%s\n")
1682 " with %d changes to %d files%s\n")
1686 % (changesets, revisions, files, heads))
1683 % (changesets, revisions, files, heads))
1687
1684
1688 if changesets > 0:
1685 if changesets > 0:
1689 p = lambda: cl.writepending() and self.root or ""
1686 p = lambda: cl.writepending() and self.root or ""
1690 self.hook('pretxnchangegroup', throw=True,
1687 self.hook('pretxnchangegroup', throw=True,
1691 node=hex(cl.node(clstart)), source=srctype,
1688 node=hex(cl.node(clstart)), source=srctype,
1692 url=url, pending=p)
1689 url=url, pending=p)
1693
1690
1694 # make changelog see real files again
1691 # make changelog see real files again
1695 cl.finalize(trp)
1692 cl.finalize(trp)
1696
1693
1697 tr.close()
1694 tr.close()
1698 finally:
1695 finally:
1699 tr.release()
1696 tr.release()
1700 if lock:
1697 if lock:
1701 lock.release()
1698 lock.release()
1702
1699
1703 if changesets > 0:
1700 if changesets > 0:
1704 # forcefully update the on-disk branch cache
1701 # forcefully update the on-disk branch cache
1705 self.ui.debug("updating the branch cache\n")
1702 self.ui.debug("updating the branch cache\n")
1706 self.branchtags()
1703 self.branchtags()
1707 self.hook("changegroup", node=hex(cl.node(clstart)),
1704 self.hook("changegroup", node=hex(cl.node(clstart)),
1708 source=srctype, url=url)
1705 source=srctype, url=url)
1709
1706
1710 for i in xrange(clstart, clend):
1707 for i in xrange(clstart, clend):
1711 self.hook("incoming", node=hex(cl.node(i)),
1708 self.hook("incoming", node=hex(cl.node(i)),
1712 source=srctype, url=url)
1709 source=srctype, url=url)
1713
1710
1714 # never return 0 here:
1711 # never return 0 here:
1715 if newheads < oldheads:
1712 if newheads < oldheads:
1716 return newheads - oldheads - 1
1713 return newheads - oldheads - 1
1717 else:
1714 else:
1718 return newheads - oldheads + 1
1715 return newheads - oldheads + 1
1719
1716
1720
1717
1721 def stream_in(self, remote):
1718 def stream_in(self, remote):
1722 fp = remote.stream_out()
1719 fp = remote.stream_out()
1723 l = fp.readline()
1720 l = fp.readline()
1724 try:
1721 try:
1725 resp = int(l)
1722 resp = int(l)
1726 except ValueError:
1723 except ValueError:
1727 raise error.ResponseError(
1724 raise error.ResponseError(
1728 _('Unexpected response from remote server:'), l)
1725 _('Unexpected response from remote server:'), l)
1729 if resp == 1:
1726 if resp == 1:
1730 raise util.Abort(_('operation forbidden by server'))
1727 raise util.Abort(_('operation forbidden by server'))
1731 elif resp == 2:
1728 elif resp == 2:
1732 raise util.Abort(_('locking the remote repository failed'))
1729 raise util.Abort(_('locking the remote repository failed'))
1733 elif resp != 0:
1730 elif resp != 0:
1734 raise util.Abort(_('the server sent an unknown error code'))
1731 raise util.Abort(_('the server sent an unknown error code'))
1735 self.ui.status(_('streaming all changes\n'))
1732 self.ui.status(_('streaming all changes\n'))
1736 l = fp.readline()
1733 l = fp.readline()
1737 try:
1734 try:
1738 total_files, total_bytes = map(int, l.split(' ', 1))
1735 total_files, total_bytes = map(int, l.split(' ', 1))
1739 except (ValueError, TypeError):
1736 except (ValueError, TypeError):
1740 raise error.ResponseError(
1737 raise error.ResponseError(
1741 _('Unexpected response from remote server:'), l)
1738 _('Unexpected response from remote server:'), l)
1742 self.ui.status(_('%d files to transfer, %s of data\n') %
1739 self.ui.status(_('%d files to transfer, %s of data\n') %
1743 (total_files, util.bytecount(total_bytes)))
1740 (total_files, util.bytecount(total_bytes)))
1744 start = time.time()
1741 start = time.time()
1745 for i in xrange(total_files):
1742 for i in xrange(total_files):
1746 # XXX doesn't support '\n' or '\r' in filenames
1743 # XXX doesn't support '\n' or '\r' in filenames
1747 l = fp.readline()
1744 l = fp.readline()
1748 try:
1745 try:
1749 name, size = l.split('\0', 1)
1746 name, size = l.split('\0', 1)
1750 size = int(size)
1747 size = int(size)
1751 except (ValueError, TypeError):
1748 except (ValueError, TypeError):
1752 raise error.ResponseError(
1749 raise error.ResponseError(
1753 _('Unexpected response from remote server:'), l)
1750 _('Unexpected response from remote server:'), l)
1754 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1751 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1755 # for backwards compat, name was partially encoded
1752 # for backwards compat, name was partially encoded
1756 ofp = self.sopener(store.decodedir(name), 'w')
1753 ofp = self.sopener(store.decodedir(name), 'w')
1757 for chunk in util.filechunkiter(fp, limit=size):
1754 for chunk in util.filechunkiter(fp, limit=size):
1758 ofp.write(chunk)
1755 ofp.write(chunk)
1759 ofp.close()
1756 ofp.close()
1760 elapsed = time.time() - start
1757 elapsed = time.time() - start
1761 if elapsed <= 0:
1758 if elapsed <= 0:
1762 elapsed = 0.001
1759 elapsed = 0.001
1763 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1760 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1764 (util.bytecount(total_bytes), elapsed,
1761 (util.bytecount(total_bytes), elapsed,
1765 util.bytecount(total_bytes / elapsed)))
1762 util.bytecount(total_bytes / elapsed)))
1766 self.invalidate()
1763 self.invalidate()
1767 return len(self.heads()) + 1
1764 return len(self.heads()) + 1
1768
1765
1769 def clone(self, remote, heads=[], stream=False):
1766 def clone(self, remote, heads=[], stream=False):
1770 '''clone remote repository.
1767 '''clone remote repository.
1771
1768
1772 keyword arguments:
1769 keyword arguments:
1773 heads: list of revs to clone (forces use of pull)
1770 heads: list of revs to clone (forces use of pull)
1774 stream: use streaming clone if possible'''
1771 stream: use streaming clone if possible'''
1775
1772
1776 # now, all clients that can request uncompressed clones can
1773 # now, all clients that can request uncompressed clones can
1777 # read repo formats supported by all servers that can serve
1774 # read repo formats supported by all servers that can serve
1778 # them.
1775 # them.
1779
1776
1780 # if revlog format changes, client will have to check version
1777 # if revlog format changes, client will have to check version
1781 # and format flags on "stream" capability, and use
1778 # and format flags on "stream" capability, and use
1782 # uncompressed only if compatible.
1779 # uncompressed only if compatible.
1783
1780
1784 if stream and not heads and remote.capable('stream'):
1781 if stream and not heads and remote.capable('stream'):
1785 return self.stream_in(remote)
1782 return self.stream_in(remote)
1786 return self.pull(remote, heads)
1783 return self.pull(remote, heads)
1787
1784
1788 def pushkey(self, namespace, key, old, new):
1785 def pushkey(self, namespace, key, old, new):
1789 return pushkey.push(self, namespace, key, old, new)
1786 return pushkey.push(self, namespace, key, old, new)
1790
1787
1791 def listkeys(self, namespace):
1788 def listkeys(self, namespace):
1792 return pushkey.list(self, namespace)
1789 return pushkey.list(self, namespace)
1793
1790
1794 # used to avoid circular references so destructors work
1791 # used to avoid circular references so destructors work
1795 def aftertrans(files):
1792 def aftertrans(files):
1796 renamefiles = [tuple(t) for t in files]
1793 renamefiles = [tuple(t) for t in files]
1797 def a():
1794 def a():
1798 for src, dest in renamefiles:
1795 for src, dest in renamefiles:
1799 util.rename(src, dest)
1796 util.rename(src, dest)
1800 return a
1797 return a
1801
1798
1802 def instance(ui, path, create):
1799 def instance(ui, path, create):
1803 return localrepository(ui, util.drop_scheme('file', path), create)
1800 return localrepository(ui, util.drop_scheme('file', path), create)
1804
1801
1805 def islocal(path):
1802 def islocal(path):
1806 return True
1803 return True
@@ -1,44 +1,37 b''
1 # repo.py - repository base classes for mercurial
1 # repo.py - repository base classes for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from i18n import _
9 from i18n import _
10 import error
10 import error
11
11
12 class repository(object):
12 class repository(object):
13 def capable(self, name):
13 def capable(self, name):
14 '''tell whether repo supports named capability.
14 '''tell whether repo supports named capability.
15 return False if not supported.
15 return False if not supported.
16 if boolean capability, return True.
16 if boolean capability, return True.
17 if string capability, return string.'''
17 if string capability, return string.'''
18 if name in self.capabilities:
18 if name in self.capabilities:
19 return True
19 return True
20 name_eq = name + '='
20 name_eq = name + '='
21 for cap in self.capabilities:
21 for cap in self.capabilities:
22 if cap.startswith(name_eq):
22 if cap.startswith(name_eq):
23 return cap[len(name_eq):]
23 return cap[len(name_eq):]
24 return False
24 return False
25
25
26 def requirecap(self, name, purpose):
26 def requirecap(self, name, purpose):
27 '''raise an exception if the given capability is not present'''
27 '''raise an exception if the given capability is not present'''
28 if not self.capable(name):
28 if not self.capable(name):
29 raise error.CapabilityError(
29 raise error.CapabilityError(
30 _('cannot %s; remote repository does not '
30 _('cannot %s; remote repository does not '
31 'support the %r capability') % (purpose, name))
31 'support the %r capability') % (purpose, name))
32
32
33 def local(self):
33 def local(self):
34 return False
34 return False
35
35
36 def cancopy(self):
36 def cancopy(self):
37 return self.local()
37 return self.local()
38
39 def rjoin(self, path):
40 url = self.url()
41 if url.endswith('/'):
42 return url + path
43 else:
44 return url + '/' + path
General Comments 0
You need to be logged in to leave comments. Login now