##// END OF EJS Templates
progress: add progress calls to changeset discovery
Matt Mackall -
r10435:956498af default
parent child Browse files
Show More
@@ -1,2216 +1,2219 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo
10 import repo, changegroup, subrepo
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as match_
14 import match as match_
15 import merge as merge_
15 import merge as merge_
16 import tags as tags_
16 import tags as tags_
17 from lock import release
17 from lock import release
18 import weakref, stat, errno, os, time, inspect
18 import weakref, stat, errno, os, time, inspect
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20
20
21 class localrepository(repo.repository):
21 class localrepository(repo.repository):
22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
23 supported = set('revlogv1 store fncache shared'.split())
23 supported = set('revlogv1 store fncache shared'.split())
24
24
25 def __init__(self, baseui, path=None, create=0):
25 def __init__(self, baseui, path=None, create=0):
26 repo.repository.__init__(self)
26 repo.repository.__init__(self)
27 self.root = os.path.realpath(path)
27 self.root = os.path.realpath(path)
28 self.path = os.path.join(self.root, ".hg")
28 self.path = os.path.join(self.root, ".hg")
29 self.origroot = path
29 self.origroot = path
30 self.opener = util.opener(self.path)
30 self.opener = util.opener(self.path)
31 self.wopener = util.opener(self.root)
31 self.wopener = util.opener(self.root)
32 self.baseui = baseui
32 self.baseui = baseui
33 self.ui = baseui.copy()
33 self.ui = baseui.copy()
34
34
35 try:
35 try:
36 self.ui.readconfig(self.join("hgrc"), self.root)
36 self.ui.readconfig(self.join("hgrc"), self.root)
37 extensions.loadall(self.ui)
37 extensions.loadall(self.ui)
38 except IOError:
38 except IOError:
39 pass
39 pass
40
40
41 if not os.path.isdir(self.path):
41 if not os.path.isdir(self.path):
42 if create:
42 if create:
43 if not os.path.exists(path):
43 if not os.path.exists(path):
44 os.mkdir(path)
44 os.mkdir(path)
45 os.mkdir(self.path)
45 os.mkdir(self.path)
46 requirements = ["revlogv1"]
46 requirements = ["revlogv1"]
47 if self.ui.configbool('format', 'usestore', True):
47 if self.ui.configbool('format', 'usestore', True):
48 os.mkdir(os.path.join(self.path, "store"))
48 os.mkdir(os.path.join(self.path, "store"))
49 requirements.append("store")
49 requirements.append("store")
50 if self.ui.configbool('format', 'usefncache', True):
50 if self.ui.configbool('format', 'usefncache', True):
51 requirements.append("fncache")
51 requirements.append("fncache")
52 # create an invalid changelog
52 # create an invalid changelog
53 self.opener("00changelog.i", "a").write(
53 self.opener("00changelog.i", "a").write(
54 '\0\0\0\2' # represents revlogv2
54 '\0\0\0\2' # represents revlogv2
55 ' dummy changelog to prevent using the old repo layout'
55 ' dummy changelog to prevent using the old repo layout'
56 )
56 )
57 reqfile = self.opener("requires", "w")
57 reqfile = self.opener("requires", "w")
58 for r in requirements:
58 for r in requirements:
59 reqfile.write("%s\n" % r)
59 reqfile.write("%s\n" % r)
60 reqfile.close()
60 reqfile.close()
61 else:
61 else:
62 raise error.RepoError(_("repository %s not found") % path)
62 raise error.RepoError(_("repository %s not found") % path)
63 elif create:
63 elif create:
64 raise error.RepoError(_("repository %s already exists") % path)
64 raise error.RepoError(_("repository %s already exists") % path)
65 else:
65 else:
66 # find requirements
66 # find requirements
67 requirements = set()
67 requirements = set()
68 try:
68 try:
69 requirements = set(self.opener("requires").read().splitlines())
69 requirements = set(self.opener("requires").read().splitlines())
70 except IOError, inst:
70 except IOError, inst:
71 if inst.errno != errno.ENOENT:
71 if inst.errno != errno.ENOENT:
72 raise
72 raise
73 for r in requirements - self.supported:
73 for r in requirements - self.supported:
74 raise error.RepoError(_("requirement '%s' not supported") % r)
74 raise error.RepoError(_("requirement '%s' not supported") % r)
75
75
76 self.sharedpath = self.path
76 self.sharedpath = self.path
77 try:
77 try:
78 s = os.path.realpath(self.opener("sharedpath").read())
78 s = os.path.realpath(self.opener("sharedpath").read())
79 if not os.path.exists(s):
79 if not os.path.exists(s):
80 raise error.RepoError(
80 raise error.RepoError(
81 _('.hg/sharedpath points to nonexistent directory %s') % s)
81 _('.hg/sharedpath points to nonexistent directory %s') % s)
82 self.sharedpath = s
82 self.sharedpath = s
83 except IOError, inst:
83 except IOError, inst:
84 if inst.errno != errno.ENOENT:
84 if inst.errno != errno.ENOENT:
85 raise
85 raise
86
86
87 self.store = store.store(requirements, self.sharedpath, util.opener)
87 self.store = store.store(requirements, self.sharedpath, util.opener)
88 self.spath = self.store.path
88 self.spath = self.store.path
89 self.sopener = self.store.opener
89 self.sopener = self.store.opener
90 self.sjoin = self.store.join
90 self.sjoin = self.store.join
91 self.opener.createmode = self.store.createmode
91 self.opener.createmode = self.store.createmode
92 self.sopener.options = {}
92 self.sopener.options = {}
93
93
94 # These two define the set of tags for this repository. _tags
94 # These two define the set of tags for this repository. _tags
95 # maps tag name to node; _tagtypes maps tag name to 'global' or
95 # maps tag name to node; _tagtypes maps tag name to 'global' or
96 # 'local'. (Global tags are defined by .hgtags across all
96 # 'local'. (Global tags are defined by .hgtags across all
97 # heads, and local tags are defined in .hg/localtags.) They
97 # heads, and local tags are defined in .hg/localtags.) They
98 # constitute the in-memory cache of tags.
98 # constitute the in-memory cache of tags.
99 self._tags = None
99 self._tags = None
100 self._tagtypes = None
100 self._tagtypes = None
101
101
102 self._branchcache = None # in UTF-8
102 self._branchcache = None # in UTF-8
103 self._branchcachetip = None
103 self._branchcachetip = None
104 self.nodetagscache = None
104 self.nodetagscache = None
105 self.filterpats = {}
105 self.filterpats = {}
106 self._datafilters = {}
106 self._datafilters = {}
107 self._transref = self._lockref = self._wlockref = None
107 self._transref = self._lockref = self._wlockref = None
108
108
109 @propertycache
109 @propertycache
110 def changelog(self):
110 def changelog(self):
111 c = changelog.changelog(self.sopener)
111 c = changelog.changelog(self.sopener)
112 if 'HG_PENDING' in os.environ:
112 if 'HG_PENDING' in os.environ:
113 p = os.environ['HG_PENDING']
113 p = os.environ['HG_PENDING']
114 if p.startswith(self.root):
114 if p.startswith(self.root):
115 c.readpending('00changelog.i.a')
115 c.readpending('00changelog.i.a')
116 self.sopener.options['defversion'] = c.version
116 self.sopener.options['defversion'] = c.version
117 return c
117 return c
118
118
119 @propertycache
119 @propertycache
120 def manifest(self):
120 def manifest(self):
121 return manifest.manifest(self.sopener)
121 return manifest.manifest(self.sopener)
122
122
123 @propertycache
123 @propertycache
124 def dirstate(self):
124 def dirstate(self):
125 return dirstate.dirstate(self.opener, self.ui, self.root)
125 return dirstate.dirstate(self.opener, self.ui, self.root)
126
126
127 def __getitem__(self, changeid):
127 def __getitem__(self, changeid):
128 if changeid is None:
128 if changeid is None:
129 return context.workingctx(self)
129 return context.workingctx(self)
130 return context.changectx(self, changeid)
130 return context.changectx(self, changeid)
131
131
132 def __contains__(self, changeid):
132 def __contains__(self, changeid):
133 try:
133 try:
134 return bool(self.lookup(changeid))
134 return bool(self.lookup(changeid))
135 except error.RepoLookupError:
135 except error.RepoLookupError:
136 return False
136 return False
137
137
138 def __nonzero__(self):
138 def __nonzero__(self):
139 return True
139 return True
140
140
141 def __len__(self):
141 def __len__(self):
142 return len(self.changelog)
142 return len(self.changelog)
143
143
144 def __iter__(self):
144 def __iter__(self):
145 for i in xrange(len(self)):
145 for i in xrange(len(self)):
146 yield i
146 yield i
147
147
148 def url(self):
148 def url(self):
149 return 'file:' + self.root
149 return 'file:' + self.root
150
150
151 def hook(self, name, throw=False, **args):
151 def hook(self, name, throw=False, **args):
152 return hook.hook(self.ui, self, name, throw, **args)
152 return hook.hook(self.ui, self, name, throw, **args)
153
153
154 tag_disallowed = ':\r\n'
154 tag_disallowed = ':\r\n'
155
155
156 def _tag(self, names, node, message, local, user, date, extra={}):
156 def _tag(self, names, node, message, local, user, date, extra={}):
157 if isinstance(names, str):
157 if isinstance(names, str):
158 allchars = names
158 allchars = names
159 names = (names,)
159 names = (names,)
160 else:
160 else:
161 allchars = ''.join(names)
161 allchars = ''.join(names)
162 for c in self.tag_disallowed:
162 for c in self.tag_disallowed:
163 if c in allchars:
163 if c in allchars:
164 raise util.Abort(_('%r cannot be used in a tag name') % c)
164 raise util.Abort(_('%r cannot be used in a tag name') % c)
165
165
166 for name in names:
166 for name in names:
167 self.hook('pretag', throw=True, node=hex(node), tag=name,
167 self.hook('pretag', throw=True, node=hex(node), tag=name,
168 local=local)
168 local=local)
169
169
170 def writetags(fp, names, munge, prevtags):
170 def writetags(fp, names, munge, prevtags):
171 fp.seek(0, 2)
171 fp.seek(0, 2)
172 if prevtags and prevtags[-1] != '\n':
172 if prevtags and prevtags[-1] != '\n':
173 fp.write('\n')
173 fp.write('\n')
174 for name in names:
174 for name in names:
175 m = munge and munge(name) or name
175 m = munge and munge(name) or name
176 if self._tagtypes and name in self._tagtypes:
176 if self._tagtypes and name in self._tagtypes:
177 old = self._tags.get(name, nullid)
177 old = self._tags.get(name, nullid)
178 fp.write('%s %s\n' % (hex(old), m))
178 fp.write('%s %s\n' % (hex(old), m))
179 fp.write('%s %s\n' % (hex(node), m))
179 fp.write('%s %s\n' % (hex(node), m))
180 fp.close()
180 fp.close()
181
181
182 prevtags = ''
182 prevtags = ''
183 if local:
183 if local:
184 try:
184 try:
185 fp = self.opener('localtags', 'r+')
185 fp = self.opener('localtags', 'r+')
186 except IOError:
186 except IOError:
187 fp = self.opener('localtags', 'a')
187 fp = self.opener('localtags', 'a')
188 else:
188 else:
189 prevtags = fp.read()
189 prevtags = fp.read()
190
190
191 # local tags are stored in the current charset
191 # local tags are stored in the current charset
192 writetags(fp, names, None, prevtags)
192 writetags(fp, names, None, prevtags)
193 for name in names:
193 for name in names:
194 self.hook('tag', node=hex(node), tag=name, local=local)
194 self.hook('tag', node=hex(node), tag=name, local=local)
195 return
195 return
196
196
197 try:
197 try:
198 fp = self.wfile('.hgtags', 'rb+')
198 fp = self.wfile('.hgtags', 'rb+')
199 except IOError:
199 except IOError:
200 fp = self.wfile('.hgtags', 'ab')
200 fp = self.wfile('.hgtags', 'ab')
201 else:
201 else:
202 prevtags = fp.read()
202 prevtags = fp.read()
203
203
204 # committed tags are stored in UTF-8
204 # committed tags are stored in UTF-8
205 writetags(fp, names, encoding.fromlocal, prevtags)
205 writetags(fp, names, encoding.fromlocal, prevtags)
206
206
207 if '.hgtags' not in self.dirstate:
207 if '.hgtags' not in self.dirstate:
208 self.add(['.hgtags'])
208 self.add(['.hgtags'])
209
209
210 m = match_.exact(self.root, '', ['.hgtags'])
210 m = match_.exact(self.root, '', ['.hgtags'])
211 tagnode = self.commit(message, user, date, extra=extra, match=m)
211 tagnode = self.commit(message, user, date, extra=extra, match=m)
212
212
213 for name in names:
213 for name in names:
214 self.hook('tag', node=hex(node), tag=name, local=local)
214 self.hook('tag', node=hex(node), tag=name, local=local)
215
215
216 return tagnode
216 return tagnode
217
217
218 def tag(self, names, node, message, local, user, date):
218 def tag(self, names, node, message, local, user, date):
219 '''tag a revision with one or more symbolic names.
219 '''tag a revision with one or more symbolic names.
220
220
221 names is a list of strings or, when adding a single tag, names may be a
221 names is a list of strings or, when adding a single tag, names may be a
222 string.
222 string.
223
223
224 if local is True, the tags are stored in a per-repository file.
224 if local is True, the tags are stored in a per-repository file.
225 otherwise, they are stored in the .hgtags file, and a new
225 otherwise, they are stored in the .hgtags file, and a new
226 changeset is committed with the change.
226 changeset is committed with the change.
227
227
228 keyword arguments:
228 keyword arguments:
229
229
230 local: whether to store tags in non-version-controlled file
230 local: whether to store tags in non-version-controlled file
231 (default False)
231 (default False)
232
232
233 message: commit message to use if committing
233 message: commit message to use if committing
234
234
235 user: name of user to use if committing
235 user: name of user to use if committing
236
236
237 date: date tuple to use if committing'''
237 date: date tuple to use if committing'''
238
238
239 for x in self.status()[:5]:
239 for x in self.status()[:5]:
240 if '.hgtags' in x:
240 if '.hgtags' in x:
241 raise util.Abort(_('working copy of .hgtags is changed '
241 raise util.Abort(_('working copy of .hgtags is changed '
242 '(please commit .hgtags manually)'))
242 '(please commit .hgtags manually)'))
243
243
244 self.tags() # instantiate the cache
244 self.tags() # instantiate the cache
245 self._tag(names, node, message, local, user, date)
245 self._tag(names, node, message, local, user, date)
246
246
247 def tags(self):
247 def tags(self):
248 '''return a mapping of tag to node'''
248 '''return a mapping of tag to node'''
249 if self._tags is None:
249 if self._tags is None:
250 (self._tags, self._tagtypes) = self._findtags()
250 (self._tags, self._tagtypes) = self._findtags()
251
251
252 return self._tags
252 return self._tags
253
253
254 def _findtags(self):
254 def _findtags(self):
255 '''Do the hard work of finding tags. Return a pair of dicts
255 '''Do the hard work of finding tags. Return a pair of dicts
256 (tags, tagtypes) where tags maps tag name to node, and tagtypes
256 (tags, tagtypes) where tags maps tag name to node, and tagtypes
257 maps tag name to a string like \'global\' or \'local\'.
257 maps tag name to a string like \'global\' or \'local\'.
258 Subclasses or extensions are free to add their own tags, but
258 Subclasses or extensions are free to add their own tags, but
259 should be aware that the returned dicts will be retained for the
259 should be aware that the returned dicts will be retained for the
260 duration of the localrepo object.'''
260 duration of the localrepo object.'''
261
261
262 # XXX what tagtype should subclasses/extensions use? Currently
262 # XXX what tagtype should subclasses/extensions use? Currently
263 # mq and bookmarks add tags, but do not set the tagtype at all.
263 # mq and bookmarks add tags, but do not set the tagtype at all.
264 # Should each extension invent its own tag type? Should there
264 # Should each extension invent its own tag type? Should there
265 # be one tagtype for all such "virtual" tags? Or is the status
265 # be one tagtype for all such "virtual" tags? Or is the status
266 # quo fine?
266 # quo fine?
267
267
268 alltags = {} # map tag name to (node, hist)
268 alltags = {} # map tag name to (node, hist)
269 tagtypes = {}
269 tagtypes = {}
270
270
271 tags_.findglobaltags(self.ui, self, alltags, tagtypes)
271 tags_.findglobaltags(self.ui, self, alltags, tagtypes)
272 tags_.readlocaltags(self.ui, self, alltags, tagtypes)
272 tags_.readlocaltags(self.ui, self, alltags, tagtypes)
273
273
274 # Build the return dicts. Have to re-encode tag names because
274 # Build the return dicts. Have to re-encode tag names because
275 # the tags module always uses UTF-8 (in order not to lose info
275 # the tags module always uses UTF-8 (in order not to lose info
276 # writing to the cache), but the rest of Mercurial wants them in
276 # writing to the cache), but the rest of Mercurial wants them in
277 # local encoding.
277 # local encoding.
278 tags = {}
278 tags = {}
279 for (name, (node, hist)) in alltags.iteritems():
279 for (name, (node, hist)) in alltags.iteritems():
280 if node != nullid:
280 if node != nullid:
281 tags[encoding.tolocal(name)] = node
281 tags[encoding.tolocal(name)] = node
282 tags['tip'] = self.changelog.tip()
282 tags['tip'] = self.changelog.tip()
283 tagtypes = dict([(encoding.tolocal(name), value)
283 tagtypes = dict([(encoding.tolocal(name), value)
284 for (name, value) in tagtypes.iteritems()])
284 for (name, value) in tagtypes.iteritems()])
285 return (tags, tagtypes)
285 return (tags, tagtypes)
286
286
287 def tagtype(self, tagname):
287 def tagtype(self, tagname):
288 '''
288 '''
289 return the type of the given tag. result can be:
289 return the type of the given tag. result can be:
290
290
291 'local' : a local tag
291 'local' : a local tag
292 'global' : a global tag
292 'global' : a global tag
293 None : tag does not exist
293 None : tag does not exist
294 '''
294 '''
295
295
296 self.tags()
296 self.tags()
297
297
298 return self._tagtypes.get(tagname)
298 return self._tagtypes.get(tagname)
299
299
300 def tagslist(self):
300 def tagslist(self):
301 '''return a list of tags ordered by revision'''
301 '''return a list of tags ordered by revision'''
302 l = []
302 l = []
303 for t, n in self.tags().iteritems():
303 for t, n in self.tags().iteritems():
304 try:
304 try:
305 r = self.changelog.rev(n)
305 r = self.changelog.rev(n)
306 except:
306 except:
307 r = -2 # sort to the beginning of the list if unknown
307 r = -2 # sort to the beginning of the list if unknown
308 l.append((r, t, n))
308 l.append((r, t, n))
309 return [(t, n) for r, t, n in sorted(l)]
309 return [(t, n) for r, t, n in sorted(l)]
310
310
311 def nodetags(self, node):
311 def nodetags(self, node):
312 '''return the tags associated with a node'''
312 '''return the tags associated with a node'''
313 if not self.nodetagscache:
313 if not self.nodetagscache:
314 self.nodetagscache = {}
314 self.nodetagscache = {}
315 for t, n in self.tags().iteritems():
315 for t, n in self.tags().iteritems():
316 self.nodetagscache.setdefault(n, []).append(t)
316 self.nodetagscache.setdefault(n, []).append(t)
317 return self.nodetagscache.get(node, [])
317 return self.nodetagscache.get(node, [])
318
318
319 def _branchtags(self, partial, lrev):
319 def _branchtags(self, partial, lrev):
320 # TODO: rename this function?
320 # TODO: rename this function?
321 tiprev = len(self) - 1
321 tiprev = len(self) - 1
322 if lrev != tiprev:
322 if lrev != tiprev:
323 self._updatebranchcache(partial, lrev + 1, tiprev + 1)
323 self._updatebranchcache(partial, lrev + 1, tiprev + 1)
324 self._writebranchcache(partial, self.changelog.tip(), tiprev)
324 self._writebranchcache(partial, self.changelog.tip(), tiprev)
325
325
326 return partial
326 return partial
327
327
328 def branchmap(self):
328 def branchmap(self):
329 '''returns a dictionary {branch: [branchheads]}'''
329 '''returns a dictionary {branch: [branchheads]}'''
330 tip = self.changelog.tip()
330 tip = self.changelog.tip()
331 if self._branchcache is not None and self._branchcachetip == tip:
331 if self._branchcache is not None and self._branchcachetip == tip:
332 return self._branchcache
332 return self._branchcache
333
333
334 oldtip = self._branchcachetip
334 oldtip = self._branchcachetip
335 self._branchcachetip = tip
335 self._branchcachetip = tip
336 if oldtip is None or oldtip not in self.changelog.nodemap:
336 if oldtip is None or oldtip not in self.changelog.nodemap:
337 partial, last, lrev = self._readbranchcache()
337 partial, last, lrev = self._readbranchcache()
338 else:
338 else:
339 lrev = self.changelog.rev(oldtip)
339 lrev = self.changelog.rev(oldtip)
340 partial = self._branchcache
340 partial = self._branchcache
341
341
342 self._branchtags(partial, lrev)
342 self._branchtags(partial, lrev)
343 # this private cache holds all heads (not just tips)
343 # this private cache holds all heads (not just tips)
344 self._branchcache = partial
344 self._branchcache = partial
345
345
346 return self._branchcache
346 return self._branchcache
347
347
348 def branchtags(self):
348 def branchtags(self):
349 '''return a dict where branch names map to the tipmost head of
349 '''return a dict where branch names map to the tipmost head of
350 the branch, open heads come before closed'''
350 the branch, open heads come before closed'''
351 bt = {}
351 bt = {}
352 for bn, heads in self.branchmap().iteritems():
352 for bn, heads in self.branchmap().iteritems():
353 tip = heads[-1]
353 tip = heads[-1]
354 for h in reversed(heads):
354 for h in reversed(heads):
355 if 'close' not in self.changelog.read(h)[5]:
355 if 'close' not in self.changelog.read(h)[5]:
356 tip = h
356 tip = h
357 break
357 break
358 bt[bn] = tip
358 bt[bn] = tip
359 return bt
359 return bt
360
360
361
361
362 def _readbranchcache(self):
362 def _readbranchcache(self):
363 partial = {}
363 partial = {}
364 try:
364 try:
365 f = self.opener("branchheads.cache")
365 f = self.opener("branchheads.cache")
366 lines = f.read().split('\n')
366 lines = f.read().split('\n')
367 f.close()
367 f.close()
368 except (IOError, OSError):
368 except (IOError, OSError):
369 return {}, nullid, nullrev
369 return {}, nullid, nullrev
370
370
371 try:
371 try:
372 last, lrev = lines.pop(0).split(" ", 1)
372 last, lrev = lines.pop(0).split(" ", 1)
373 last, lrev = bin(last), int(lrev)
373 last, lrev = bin(last), int(lrev)
374 if lrev >= len(self) or self[lrev].node() != last:
374 if lrev >= len(self) or self[lrev].node() != last:
375 # invalidate the cache
375 # invalidate the cache
376 raise ValueError('invalidating branch cache (tip differs)')
376 raise ValueError('invalidating branch cache (tip differs)')
377 for l in lines:
377 for l in lines:
378 if not l:
378 if not l:
379 continue
379 continue
380 node, label = l.split(" ", 1)
380 node, label = l.split(" ", 1)
381 partial.setdefault(label.strip(), []).append(bin(node))
381 partial.setdefault(label.strip(), []).append(bin(node))
382 except KeyboardInterrupt:
382 except KeyboardInterrupt:
383 raise
383 raise
384 except Exception, inst:
384 except Exception, inst:
385 if self.ui.debugflag:
385 if self.ui.debugflag:
386 self.ui.warn(str(inst), '\n')
386 self.ui.warn(str(inst), '\n')
387 partial, last, lrev = {}, nullid, nullrev
387 partial, last, lrev = {}, nullid, nullrev
388 return partial, last, lrev
388 return partial, last, lrev
389
389
390 def _writebranchcache(self, branches, tip, tiprev):
390 def _writebranchcache(self, branches, tip, tiprev):
391 try:
391 try:
392 f = self.opener("branchheads.cache", "w", atomictemp=True)
392 f = self.opener("branchheads.cache", "w", atomictemp=True)
393 f.write("%s %s\n" % (hex(tip), tiprev))
393 f.write("%s %s\n" % (hex(tip), tiprev))
394 for label, nodes in branches.iteritems():
394 for label, nodes in branches.iteritems():
395 for node in nodes:
395 for node in nodes:
396 f.write("%s %s\n" % (hex(node), label))
396 f.write("%s %s\n" % (hex(node), label))
397 f.rename()
397 f.rename()
398 except (IOError, OSError):
398 except (IOError, OSError):
399 pass
399 pass
400
400
401 def _updatebranchcache(self, partial, start, end):
401 def _updatebranchcache(self, partial, start, end):
402 # collect new branch entries
402 # collect new branch entries
403 newbranches = {}
403 newbranches = {}
404 for r in xrange(start, end):
404 for r in xrange(start, end):
405 c = self[r]
405 c = self[r]
406 newbranches.setdefault(c.branch(), []).append(c.node())
406 newbranches.setdefault(c.branch(), []).append(c.node())
407 # if older branchheads are reachable from new ones, they aren't
407 # if older branchheads are reachable from new ones, they aren't
408 # really branchheads. Note checking parents is insufficient:
408 # really branchheads. Note checking parents is insufficient:
409 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
409 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
410 for branch, newnodes in newbranches.iteritems():
410 for branch, newnodes in newbranches.iteritems():
411 bheads = partial.setdefault(branch, [])
411 bheads = partial.setdefault(branch, [])
412 bheads.extend(newnodes)
412 bheads.extend(newnodes)
413 if len(bheads) < 2:
413 if len(bheads) < 2:
414 continue
414 continue
415 newbheads = []
415 newbheads = []
416 # starting from tip means fewer passes over reachable
416 # starting from tip means fewer passes over reachable
417 while newnodes:
417 while newnodes:
418 latest = newnodes.pop()
418 latest = newnodes.pop()
419 if latest not in bheads:
419 if latest not in bheads:
420 continue
420 continue
421 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
421 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
422 reachable = self.changelog.reachable(latest, minbhrev)
422 reachable = self.changelog.reachable(latest, minbhrev)
423 bheads = [b for b in bheads if b not in reachable]
423 bheads = [b for b in bheads if b not in reachable]
424 newbheads.insert(0, latest)
424 newbheads.insert(0, latest)
425 bheads.extend(newbheads)
425 bheads.extend(newbheads)
426 partial[branch] = bheads
426 partial[branch] = bheads
427
427
428 def lookup(self, key):
428 def lookup(self, key):
429 if isinstance(key, int):
429 if isinstance(key, int):
430 return self.changelog.node(key)
430 return self.changelog.node(key)
431 elif key == '.':
431 elif key == '.':
432 return self.dirstate.parents()[0]
432 return self.dirstate.parents()[0]
433 elif key == 'null':
433 elif key == 'null':
434 return nullid
434 return nullid
435 elif key == 'tip':
435 elif key == 'tip':
436 return self.changelog.tip()
436 return self.changelog.tip()
437 n = self.changelog._match(key)
437 n = self.changelog._match(key)
438 if n:
438 if n:
439 return n
439 return n
440 if key in self.tags():
440 if key in self.tags():
441 return self.tags()[key]
441 return self.tags()[key]
442 if key in self.branchtags():
442 if key in self.branchtags():
443 return self.branchtags()[key]
443 return self.branchtags()[key]
444 n = self.changelog._partialmatch(key)
444 n = self.changelog._partialmatch(key)
445 if n:
445 if n:
446 return n
446 return n
447
447
448 # can't find key, check if it might have come from damaged dirstate
448 # can't find key, check if it might have come from damaged dirstate
449 if key in self.dirstate.parents():
449 if key in self.dirstate.parents():
450 raise error.Abort(_("working directory has unknown parent '%s'!")
450 raise error.Abort(_("working directory has unknown parent '%s'!")
451 % short(key))
451 % short(key))
452 try:
452 try:
453 if len(key) == 20:
453 if len(key) == 20:
454 key = hex(key)
454 key = hex(key)
455 except:
455 except:
456 pass
456 pass
457 raise error.RepoLookupError(_("unknown revision '%s'") % key)
457 raise error.RepoLookupError(_("unknown revision '%s'") % key)
458
458
459 def local(self):
459 def local(self):
460 return True
460 return True
461
461
462 def join(self, f):
462 def join(self, f):
463 return os.path.join(self.path, f)
463 return os.path.join(self.path, f)
464
464
465 def wjoin(self, f):
465 def wjoin(self, f):
466 return os.path.join(self.root, f)
466 return os.path.join(self.root, f)
467
467
468 def rjoin(self, f):
468 def rjoin(self, f):
469 return os.path.join(self.root, util.pconvert(f))
469 return os.path.join(self.root, util.pconvert(f))
470
470
471 def file(self, f):
471 def file(self, f):
472 if f[0] == '/':
472 if f[0] == '/':
473 f = f[1:]
473 f = f[1:]
474 return filelog.filelog(self.sopener, f)
474 return filelog.filelog(self.sopener, f)
475
475
476 def changectx(self, changeid):
476 def changectx(self, changeid):
477 return self[changeid]
477 return self[changeid]
478
478
479 def parents(self, changeid=None):
479 def parents(self, changeid=None):
480 '''get list of changectxs for parents of changeid'''
480 '''get list of changectxs for parents of changeid'''
481 return self[changeid].parents()
481 return self[changeid].parents()
482
482
483 def filectx(self, path, changeid=None, fileid=None):
483 def filectx(self, path, changeid=None, fileid=None):
484 """changeid can be a changeset revision, node, or tag.
484 """changeid can be a changeset revision, node, or tag.
485 fileid can be a file revision or node."""
485 fileid can be a file revision or node."""
486 return context.filectx(self, path, changeid, fileid)
486 return context.filectx(self, path, changeid, fileid)
487
487
488 def getcwd(self):
488 def getcwd(self):
489 return self.dirstate.getcwd()
489 return self.dirstate.getcwd()
490
490
491 def pathto(self, f, cwd=None):
491 def pathto(self, f, cwd=None):
492 return self.dirstate.pathto(f, cwd)
492 return self.dirstate.pathto(f, cwd)
493
493
494 def wfile(self, f, mode='r'):
494 def wfile(self, f, mode='r'):
495 return self.wopener(f, mode)
495 return self.wopener(f, mode)
496
496
497 def _link(self, f):
497 def _link(self, f):
498 return os.path.islink(self.wjoin(f))
498 return os.path.islink(self.wjoin(f))
499
499
500 def _filter(self, filter, filename, data):
500 def _filter(self, filter, filename, data):
501 if filter not in self.filterpats:
501 if filter not in self.filterpats:
502 l = []
502 l = []
503 for pat, cmd in self.ui.configitems(filter):
503 for pat, cmd in self.ui.configitems(filter):
504 if cmd == '!':
504 if cmd == '!':
505 continue
505 continue
506 mf = match_.match(self.root, '', [pat])
506 mf = match_.match(self.root, '', [pat])
507 fn = None
507 fn = None
508 params = cmd
508 params = cmd
509 for name, filterfn in self._datafilters.iteritems():
509 for name, filterfn in self._datafilters.iteritems():
510 if cmd.startswith(name):
510 if cmd.startswith(name):
511 fn = filterfn
511 fn = filterfn
512 params = cmd[len(name):].lstrip()
512 params = cmd[len(name):].lstrip()
513 break
513 break
514 if not fn:
514 if not fn:
515 fn = lambda s, c, **kwargs: util.filter(s, c)
515 fn = lambda s, c, **kwargs: util.filter(s, c)
516 # Wrap old filters not supporting keyword arguments
516 # Wrap old filters not supporting keyword arguments
517 if not inspect.getargspec(fn)[2]:
517 if not inspect.getargspec(fn)[2]:
518 oldfn = fn
518 oldfn = fn
519 fn = lambda s, c, **kwargs: oldfn(s, c)
519 fn = lambda s, c, **kwargs: oldfn(s, c)
520 l.append((mf, fn, params))
520 l.append((mf, fn, params))
521 self.filterpats[filter] = l
521 self.filterpats[filter] = l
522
522
523 for mf, fn, cmd in self.filterpats[filter]:
523 for mf, fn, cmd in self.filterpats[filter]:
524 if mf(filename):
524 if mf(filename):
525 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
525 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
526 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
526 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
527 break
527 break
528
528
529 return data
529 return data
530
530
531 def adddatafilter(self, name, filter):
531 def adddatafilter(self, name, filter):
532 self._datafilters[name] = filter
532 self._datafilters[name] = filter
533
533
534 def wread(self, filename):
534 def wread(self, filename):
535 if self._link(filename):
535 if self._link(filename):
536 data = os.readlink(self.wjoin(filename))
536 data = os.readlink(self.wjoin(filename))
537 else:
537 else:
538 data = self.wopener(filename, 'r').read()
538 data = self.wopener(filename, 'r').read()
539 return self._filter("encode", filename, data)
539 return self._filter("encode", filename, data)
540
540
541 def wwrite(self, filename, data, flags):
541 def wwrite(self, filename, data, flags):
542 data = self._filter("decode", filename, data)
542 data = self._filter("decode", filename, data)
543 try:
543 try:
544 os.unlink(self.wjoin(filename))
544 os.unlink(self.wjoin(filename))
545 except OSError:
545 except OSError:
546 pass
546 pass
547 if 'l' in flags:
547 if 'l' in flags:
548 self.wopener.symlink(data, filename)
548 self.wopener.symlink(data, filename)
549 else:
549 else:
550 self.wopener(filename, 'w').write(data)
550 self.wopener(filename, 'w').write(data)
551 if 'x' in flags:
551 if 'x' in flags:
552 util.set_flags(self.wjoin(filename), False, True)
552 util.set_flags(self.wjoin(filename), False, True)
553
553
554 def wwritedata(self, filename, data):
554 def wwritedata(self, filename, data):
555 return self._filter("decode", filename, data)
555 return self._filter("decode", filename, data)
556
556
557 def transaction(self):
557 def transaction(self):
558 tr = self._transref and self._transref() or None
558 tr = self._transref and self._transref() or None
559 if tr and tr.running():
559 if tr and tr.running():
560 return tr.nest()
560 return tr.nest()
561
561
562 # abort here if the journal already exists
562 # abort here if the journal already exists
563 if os.path.exists(self.sjoin("journal")):
563 if os.path.exists(self.sjoin("journal")):
564 raise error.RepoError(
564 raise error.RepoError(
565 _("abandoned transaction found - run hg recover"))
565 _("abandoned transaction found - run hg recover"))
566
566
567 # save dirstate for rollback
567 # save dirstate for rollback
568 try:
568 try:
569 ds = self.opener("dirstate").read()
569 ds = self.opener("dirstate").read()
570 except IOError:
570 except IOError:
571 ds = ""
571 ds = ""
572 self.opener("journal.dirstate", "w").write(ds)
572 self.opener("journal.dirstate", "w").write(ds)
573 self.opener("journal.branch", "w").write(self.dirstate.branch())
573 self.opener("journal.branch", "w").write(self.dirstate.branch())
574
574
575 renames = [(self.sjoin("journal"), self.sjoin("undo")),
575 renames = [(self.sjoin("journal"), self.sjoin("undo")),
576 (self.join("journal.dirstate"), self.join("undo.dirstate")),
576 (self.join("journal.dirstate"), self.join("undo.dirstate")),
577 (self.join("journal.branch"), self.join("undo.branch"))]
577 (self.join("journal.branch"), self.join("undo.branch"))]
578 tr = transaction.transaction(self.ui.warn, self.sopener,
578 tr = transaction.transaction(self.ui.warn, self.sopener,
579 self.sjoin("journal"),
579 self.sjoin("journal"),
580 aftertrans(renames),
580 aftertrans(renames),
581 self.store.createmode)
581 self.store.createmode)
582 self._transref = weakref.ref(tr)
582 self._transref = weakref.ref(tr)
583 return tr
583 return tr
584
584
585 def recover(self):
585 def recover(self):
586 lock = self.lock()
586 lock = self.lock()
587 try:
587 try:
588 if os.path.exists(self.sjoin("journal")):
588 if os.path.exists(self.sjoin("journal")):
589 self.ui.status(_("rolling back interrupted transaction\n"))
589 self.ui.status(_("rolling back interrupted transaction\n"))
590 transaction.rollback(self.sopener, self.sjoin("journal"),
590 transaction.rollback(self.sopener, self.sjoin("journal"),
591 self.ui.warn)
591 self.ui.warn)
592 self.invalidate()
592 self.invalidate()
593 return True
593 return True
594 else:
594 else:
595 self.ui.warn(_("no interrupted transaction available\n"))
595 self.ui.warn(_("no interrupted transaction available\n"))
596 return False
596 return False
597 finally:
597 finally:
598 lock.release()
598 lock.release()
599
599
600 def rollback(self):
600 def rollback(self):
601 wlock = lock = None
601 wlock = lock = None
602 try:
602 try:
603 wlock = self.wlock()
603 wlock = self.wlock()
604 lock = self.lock()
604 lock = self.lock()
605 if os.path.exists(self.sjoin("undo")):
605 if os.path.exists(self.sjoin("undo")):
606 self.ui.status(_("rolling back last transaction\n"))
606 self.ui.status(_("rolling back last transaction\n"))
607 transaction.rollback(self.sopener, self.sjoin("undo"),
607 transaction.rollback(self.sopener, self.sjoin("undo"),
608 self.ui.warn)
608 self.ui.warn)
609 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
609 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
610 try:
610 try:
611 branch = self.opener("undo.branch").read()
611 branch = self.opener("undo.branch").read()
612 self.dirstate.setbranch(branch)
612 self.dirstate.setbranch(branch)
613 except IOError:
613 except IOError:
614 self.ui.warn(_("Named branch could not be reset, "
614 self.ui.warn(_("Named branch could not be reset, "
615 "current branch still is: %s\n")
615 "current branch still is: %s\n")
616 % encoding.tolocal(self.dirstate.branch()))
616 % encoding.tolocal(self.dirstate.branch()))
617 self.invalidate()
617 self.invalidate()
618 self.dirstate.invalidate()
618 self.dirstate.invalidate()
619 self.destroyed()
619 self.destroyed()
620 else:
620 else:
621 self.ui.warn(_("no rollback information available\n"))
621 self.ui.warn(_("no rollback information available\n"))
622 finally:
622 finally:
623 release(lock, wlock)
623 release(lock, wlock)
624
624
625 def invalidate(self):
625 def invalidate(self):
626 for a in "changelog manifest".split():
626 for a in "changelog manifest".split():
627 if a in self.__dict__:
627 if a in self.__dict__:
628 delattr(self, a)
628 delattr(self, a)
629 self._tags = None
629 self._tags = None
630 self._tagtypes = None
630 self._tagtypes = None
631 self.nodetagscache = None
631 self.nodetagscache = None
632 self._branchcache = None # in UTF-8
632 self._branchcache = None # in UTF-8
633 self._branchcachetip = None
633 self._branchcachetip = None
634
634
635 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
635 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
636 try:
636 try:
637 l = lock.lock(lockname, 0, releasefn, desc=desc)
637 l = lock.lock(lockname, 0, releasefn, desc=desc)
638 except error.LockHeld, inst:
638 except error.LockHeld, inst:
639 if not wait:
639 if not wait:
640 raise
640 raise
641 self.ui.warn(_("waiting for lock on %s held by %r\n") %
641 self.ui.warn(_("waiting for lock on %s held by %r\n") %
642 (desc, inst.locker))
642 (desc, inst.locker))
643 # default to 600 seconds timeout
643 # default to 600 seconds timeout
644 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
644 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
645 releasefn, desc=desc)
645 releasefn, desc=desc)
646 if acquirefn:
646 if acquirefn:
647 acquirefn()
647 acquirefn()
648 return l
648 return l
649
649
650 def lock(self, wait=True):
650 def lock(self, wait=True):
651 '''Lock the repository store (.hg/store) and return a weak reference
651 '''Lock the repository store (.hg/store) and return a weak reference
652 to the lock. Use this before modifying the store (e.g. committing or
652 to the lock. Use this before modifying the store (e.g. committing or
653 stripping). If you are opening a transaction, get a lock as well.)'''
653 stripping). If you are opening a transaction, get a lock as well.)'''
654 l = self._lockref and self._lockref()
654 l = self._lockref and self._lockref()
655 if l is not None and l.held:
655 if l is not None and l.held:
656 l.lock()
656 l.lock()
657 return l
657 return l
658
658
659 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
659 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
660 _('repository %s') % self.origroot)
660 _('repository %s') % self.origroot)
661 self._lockref = weakref.ref(l)
661 self._lockref = weakref.ref(l)
662 return l
662 return l
663
663
664 def wlock(self, wait=True):
664 def wlock(self, wait=True):
665 '''Lock the non-store parts of the repository (everything under
665 '''Lock the non-store parts of the repository (everything under
666 .hg except .hg/store) and return a weak reference to the lock.
666 .hg except .hg/store) and return a weak reference to the lock.
667 Use this before modifying files in .hg.'''
667 Use this before modifying files in .hg.'''
668 l = self._wlockref and self._wlockref()
668 l = self._wlockref and self._wlockref()
669 if l is not None and l.held:
669 if l is not None and l.held:
670 l.lock()
670 l.lock()
671 return l
671 return l
672
672
673 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
673 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
674 self.dirstate.invalidate, _('working directory of %s') %
674 self.dirstate.invalidate, _('working directory of %s') %
675 self.origroot)
675 self.origroot)
676 self._wlockref = weakref.ref(l)
676 self._wlockref = weakref.ref(l)
677 return l
677 return l
678
678
679 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
679 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
680 """
680 """
681 commit an individual file as part of a larger transaction
681 commit an individual file as part of a larger transaction
682 """
682 """
683
683
684 fname = fctx.path()
684 fname = fctx.path()
685 text = fctx.data()
685 text = fctx.data()
686 flog = self.file(fname)
686 flog = self.file(fname)
687 fparent1 = manifest1.get(fname, nullid)
687 fparent1 = manifest1.get(fname, nullid)
688 fparent2 = fparent2o = manifest2.get(fname, nullid)
688 fparent2 = fparent2o = manifest2.get(fname, nullid)
689
689
690 meta = {}
690 meta = {}
691 copy = fctx.renamed()
691 copy = fctx.renamed()
692 if copy and copy[0] != fname:
692 if copy and copy[0] != fname:
693 # Mark the new revision of this file as a copy of another
693 # Mark the new revision of this file as a copy of another
694 # file. This copy data will effectively act as a parent
694 # file. This copy data will effectively act as a parent
695 # of this new revision. If this is a merge, the first
695 # of this new revision. If this is a merge, the first
696 # parent will be the nullid (meaning "look up the copy data")
696 # parent will be the nullid (meaning "look up the copy data")
697 # and the second one will be the other parent. For example:
697 # and the second one will be the other parent. For example:
698 #
698 #
699 # 0 --- 1 --- 3 rev1 changes file foo
699 # 0 --- 1 --- 3 rev1 changes file foo
700 # \ / rev2 renames foo to bar and changes it
700 # \ / rev2 renames foo to bar and changes it
701 # \- 2 -/ rev3 should have bar with all changes and
701 # \- 2 -/ rev3 should have bar with all changes and
702 # should record that bar descends from
702 # should record that bar descends from
703 # bar in rev2 and foo in rev1
703 # bar in rev2 and foo in rev1
704 #
704 #
705 # this allows this merge to succeed:
705 # this allows this merge to succeed:
706 #
706 #
707 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
707 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
708 # \ / merging rev3 and rev4 should use bar@rev2
708 # \ / merging rev3 and rev4 should use bar@rev2
709 # \- 2 --- 4 as the merge base
709 # \- 2 --- 4 as the merge base
710 #
710 #
711
711
712 cfname = copy[0]
712 cfname = copy[0]
713 crev = manifest1.get(cfname)
713 crev = manifest1.get(cfname)
714 newfparent = fparent2
714 newfparent = fparent2
715
715
716 if manifest2: # branch merge
716 if manifest2: # branch merge
717 if fparent2 == nullid or crev is None: # copied on remote side
717 if fparent2 == nullid or crev is None: # copied on remote side
718 if cfname in manifest2:
718 if cfname in manifest2:
719 crev = manifest2[cfname]
719 crev = manifest2[cfname]
720 newfparent = fparent1
720 newfparent = fparent1
721
721
722 # find source in nearest ancestor if we've lost track
722 # find source in nearest ancestor if we've lost track
723 if not crev:
723 if not crev:
724 self.ui.debug(" %s: searching for copy revision for %s\n" %
724 self.ui.debug(" %s: searching for copy revision for %s\n" %
725 (fname, cfname))
725 (fname, cfname))
726 for ancestor in self['.'].ancestors():
726 for ancestor in self['.'].ancestors():
727 if cfname in ancestor:
727 if cfname in ancestor:
728 crev = ancestor[cfname].filenode()
728 crev = ancestor[cfname].filenode()
729 break
729 break
730
730
731 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
731 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
732 meta["copy"] = cfname
732 meta["copy"] = cfname
733 meta["copyrev"] = hex(crev)
733 meta["copyrev"] = hex(crev)
734 fparent1, fparent2 = nullid, newfparent
734 fparent1, fparent2 = nullid, newfparent
735 elif fparent2 != nullid:
735 elif fparent2 != nullid:
736 # is one parent an ancestor of the other?
736 # is one parent an ancestor of the other?
737 fparentancestor = flog.ancestor(fparent1, fparent2)
737 fparentancestor = flog.ancestor(fparent1, fparent2)
738 if fparentancestor == fparent1:
738 if fparentancestor == fparent1:
739 fparent1, fparent2 = fparent2, nullid
739 fparent1, fparent2 = fparent2, nullid
740 elif fparentancestor == fparent2:
740 elif fparentancestor == fparent2:
741 fparent2 = nullid
741 fparent2 = nullid
742
742
743 # is the file changed?
743 # is the file changed?
744 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
744 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
745 changelist.append(fname)
745 changelist.append(fname)
746 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
746 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
747
747
748 # are just the flags changed during merge?
748 # are just the flags changed during merge?
749 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
749 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
750 changelist.append(fname)
750 changelist.append(fname)
751
751
752 return fparent1
752 return fparent1
753
753
754 def commit(self, text="", user=None, date=None, match=None, force=False,
754 def commit(self, text="", user=None, date=None, match=None, force=False,
755 editor=False, extra={}):
755 editor=False, extra={}):
756 """Add a new revision to current repository.
756 """Add a new revision to current repository.
757
757
758 Revision information is gathered from the working directory,
758 Revision information is gathered from the working directory,
759 match can be used to filter the committed files. If editor is
759 match can be used to filter the committed files. If editor is
760 supplied, it is called to get a commit message.
760 supplied, it is called to get a commit message.
761 """
761 """
762
762
763 def fail(f, msg):
763 def fail(f, msg):
764 raise util.Abort('%s: %s' % (f, msg))
764 raise util.Abort('%s: %s' % (f, msg))
765
765
766 if not match:
766 if not match:
767 match = match_.always(self.root, '')
767 match = match_.always(self.root, '')
768
768
769 if not force:
769 if not force:
770 vdirs = []
770 vdirs = []
771 match.dir = vdirs.append
771 match.dir = vdirs.append
772 match.bad = fail
772 match.bad = fail
773
773
774 wlock = self.wlock()
774 wlock = self.wlock()
775 try:
775 try:
776 p1, p2 = self.dirstate.parents()
776 p1, p2 = self.dirstate.parents()
777 wctx = self[None]
777 wctx = self[None]
778
778
779 if (not force and p2 != nullid and match and
779 if (not force and p2 != nullid and match and
780 (match.files() or match.anypats())):
780 (match.files() or match.anypats())):
781 raise util.Abort(_('cannot partially commit a merge '
781 raise util.Abort(_('cannot partially commit a merge '
782 '(do not specify files or patterns)'))
782 '(do not specify files or patterns)'))
783
783
784 changes = self.status(match=match, clean=force)
784 changes = self.status(match=match, clean=force)
785 if force:
785 if force:
786 changes[0].extend(changes[6]) # mq may commit unchanged files
786 changes[0].extend(changes[6]) # mq may commit unchanged files
787
787
788 # check subrepos
788 # check subrepos
789 subs = []
789 subs = []
790 for s in wctx.substate:
790 for s in wctx.substate:
791 if match(s) and wctx.sub(s).dirty():
791 if match(s) and wctx.sub(s).dirty():
792 subs.append(s)
792 subs.append(s)
793 if subs and '.hgsubstate' not in changes[0]:
793 if subs and '.hgsubstate' not in changes[0]:
794 changes[0].insert(0, '.hgsubstate')
794 changes[0].insert(0, '.hgsubstate')
795
795
796 # make sure all explicit patterns are matched
796 # make sure all explicit patterns are matched
797 if not force and match.files():
797 if not force and match.files():
798 matched = set(changes[0] + changes[1] + changes[2])
798 matched = set(changes[0] + changes[1] + changes[2])
799
799
800 for f in match.files():
800 for f in match.files():
801 if f == '.' or f in matched or f in wctx.substate:
801 if f == '.' or f in matched or f in wctx.substate:
802 continue
802 continue
803 if f in changes[3]: # missing
803 if f in changes[3]: # missing
804 fail(f, _('file not found!'))
804 fail(f, _('file not found!'))
805 if f in vdirs: # visited directory
805 if f in vdirs: # visited directory
806 d = f + '/'
806 d = f + '/'
807 for mf in matched:
807 for mf in matched:
808 if mf.startswith(d):
808 if mf.startswith(d):
809 break
809 break
810 else:
810 else:
811 fail(f, _("no match under directory!"))
811 fail(f, _("no match under directory!"))
812 elif f not in self.dirstate:
812 elif f not in self.dirstate:
813 fail(f, _("file not tracked!"))
813 fail(f, _("file not tracked!"))
814
814
815 if (not force and not extra.get("close") and p2 == nullid
815 if (not force and not extra.get("close") and p2 == nullid
816 and not (changes[0] or changes[1] or changes[2])
816 and not (changes[0] or changes[1] or changes[2])
817 and self[None].branch() == self['.'].branch()):
817 and self[None].branch() == self['.'].branch()):
818 return None
818 return None
819
819
820 ms = merge_.mergestate(self)
820 ms = merge_.mergestate(self)
821 for f in changes[0]:
821 for f in changes[0]:
822 if f in ms and ms[f] == 'u':
822 if f in ms and ms[f] == 'u':
823 raise util.Abort(_("unresolved merge conflicts "
823 raise util.Abort(_("unresolved merge conflicts "
824 "(see hg resolve)"))
824 "(see hg resolve)"))
825
825
826 cctx = context.workingctx(self, (p1, p2), text, user, date,
826 cctx = context.workingctx(self, (p1, p2), text, user, date,
827 extra, changes)
827 extra, changes)
828 if editor:
828 if editor:
829 cctx._text = editor(self, cctx, subs)
829 cctx._text = editor(self, cctx, subs)
830 edited = (text != cctx._text)
830 edited = (text != cctx._text)
831
831
832 # commit subs
832 # commit subs
833 if subs:
833 if subs:
834 state = wctx.substate.copy()
834 state = wctx.substate.copy()
835 for s in subs:
835 for s in subs:
836 self.ui.status(_('committing subrepository %s\n') % s)
836 self.ui.status(_('committing subrepository %s\n') % s)
837 sr = wctx.sub(s).commit(cctx._text, user, date)
837 sr = wctx.sub(s).commit(cctx._text, user, date)
838 state[s] = (state[s][0], sr)
838 state[s] = (state[s][0], sr)
839 subrepo.writestate(self, state)
839 subrepo.writestate(self, state)
840
840
841 # Save commit message in case this transaction gets rolled back
841 # Save commit message in case this transaction gets rolled back
842 # (e.g. by a pretxncommit hook). Leave the content alone on
842 # (e.g. by a pretxncommit hook). Leave the content alone on
843 # the assumption that the user will use the same editor again.
843 # the assumption that the user will use the same editor again.
844 msgfile = self.opener('last-message.txt', 'wb')
844 msgfile = self.opener('last-message.txt', 'wb')
845 msgfile.write(cctx._text)
845 msgfile.write(cctx._text)
846 msgfile.close()
846 msgfile.close()
847
847
848 try:
848 try:
849 ret = self.commitctx(cctx, True)
849 ret = self.commitctx(cctx, True)
850 except:
850 except:
851 if edited:
851 if edited:
852 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
852 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
853 self.ui.write(
853 self.ui.write(
854 _('note: commit message saved in %s\n') % msgfn)
854 _('note: commit message saved in %s\n') % msgfn)
855 raise
855 raise
856
856
857 # update dirstate and mergestate
857 # update dirstate and mergestate
858 for f in changes[0] + changes[1]:
858 for f in changes[0] + changes[1]:
859 self.dirstate.normal(f)
859 self.dirstate.normal(f)
860 for f in changes[2]:
860 for f in changes[2]:
861 self.dirstate.forget(f)
861 self.dirstate.forget(f)
862 self.dirstate.setparents(ret)
862 self.dirstate.setparents(ret)
863 ms.reset()
863 ms.reset()
864
864
865 return ret
865 return ret
866
866
867 finally:
867 finally:
868 wlock.release()
868 wlock.release()
869
869
870 def commitctx(self, ctx, error=False):
870 def commitctx(self, ctx, error=False):
871 """Add a new revision to current repository.
871 """Add a new revision to current repository.
872
872
873 Revision information is passed via the context argument.
873 Revision information is passed via the context argument.
874 """
874 """
875
875
876 tr = lock = None
876 tr = lock = None
877 removed = ctx.removed()
877 removed = ctx.removed()
878 p1, p2 = ctx.p1(), ctx.p2()
878 p1, p2 = ctx.p1(), ctx.p2()
879 m1 = p1.manifest().copy()
879 m1 = p1.manifest().copy()
880 m2 = p2.manifest()
880 m2 = p2.manifest()
881 user = ctx.user()
881 user = ctx.user()
882
882
883 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
883 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
884 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
884 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
885
885
886 lock = self.lock()
886 lock = self.lock()
887 try:
887 try:
888 tr = self.transaction()
888 tr = self.transaction()
889 trp = weakref.proxy(tr)
889 trp = weakref.proxy(tr)
890
890
891 # check in files
891 # check in files
892 new = {}
892 new = {}
893 changed = []
893 changed = []
894 linkrev = len(self)
894 linkrev = len(self)
895 for f in sorted(ctx.modified() + ctx.added()):
895 for f in sorted(ctx.modified() + ctx.added()):
896 self.ui.note(f + "\n")
896 self.ui.note(f + "\n")
897 try:
897 try:
898 fctx = ctx[f]
898 fctx = ctx[f]
899 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
899 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
900 changed)
900 changed)
901 m1.set(f, fctx.flags())
901 m1.set(f, fctx.flags())
902 except OSError, inst:
902 except OSError, inst:
903 self.ui.warn(_("trouble committing %s!\n") % f)
903 self.ui.warn(_("trouble committing %s!\n") % f)
904 raise
904 raise
905 except IOError, inst:
905 except IOError, inst:
906 errcode = getattr(inst, 'errno', errno.ENOENT)
906 errcode = getattr(inst, 'errno', errno.ENOENT)
907 if error or errcode and errcode != errno.ENOENT:
907 if error or errcode and errcode != errno.ENOENT:
908 self.ui.warn(_("trouble committing %s!\n") % f)
908 self.ui.warn(_("trouble committing %s!\n") % f)
909 raise
909 raise
910 else:
910 else:
911 removed.append(f)
911 removed.append(f)
912
912
913 # update manifest
913 # update manifest
914 m1.update(new)
914 m1.update(new)
915 removed = [f for f in sorted(removed) if f in m1 or f in m2]
915 removed = [f for f in sorted(removed) if f in m1 or f in m2]
916 drop = [f for f in removed if f in m1]
916 drop = [f for f in removed if f in m1]
917 for f in drop:
917 for f in drop:
918 del m1[f]
918 del m1[f]
919 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
919 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
920 p2.manifestnode(), (new, drop))
920 p2.manifestnode(), (new, drop))
921
921
922 # update changelog
922 # update changelog
923 self.changelog.delayupdate()
923 self.changelog.delayupdate()
924 n = self.changelog.add(mn, changed + removed, ctx.description(),
924 n = self.changelog.add(mn, changed + removed, ctx.description(),
925 trp, p1.node(), p2.node(),
925 trp, p1.node(), p2.node(),
926 user, ctx.date(), ctx.extra().copy())
926 user, ctx.date(), ctx.extra().copy())
927 p = lambda: self.changelog.writepending() and self.root or ""
927 p = lambda: self.changelog.writepending() and self.root or ""
928 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
928 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
929 parent2=xp2, pending=p)
929 parent2=xp2, pending=p)
930 self.changelog.finalize(trp)
930 self.changelog.finalize(trp)
931 tr.close()
931 tr.close()
932
932
933 if self._branchcache:
933 if self._branchcache:
934 self.branchtags()
934 self.branchtags()
935
935
936 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
936 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
937 return n
937 return n
938 finally:
938 finally:
939 del tr
939 del tr
940 lock.release()
940 lock.release()
941
941
942 def destroyed(self):
942 def destroyed(self):
943 '''Inform the repository that nodes have been destroyed.
943 '''Inform the repository that nodes have been destroyed.
944 Intended for use by strip and rollback, so there's a common
944 Intended for use by strip and rollback, so there's a common
945 place for anything that has to be done after destroying history.'''
945 place for anything that has to be done after destroying history.'''
946 # XXX it might be nice if we could take the list of destroyed
946 # XXX it might be nice if we could take the list of destroyed
947 # nodes, but I don't see an easy way for rollback() to do that
947 # nodes, but I don't see an easy way for rollback() to do that
948
948
949 # Ensure the persistent tag cache is updated. Doing it now
949 # Ensure the persistent tag cache is updated. Doing it now
950 # means that the tag cache only has to worry about destroyed
950 # means that the tag cache only has to worry about destroyed
951 # heads immediately after a strip/rollback. That in turn
951 # heads immediately after a strip/rollback. That in turn
952 # guarantees that "cachetip == currenttip" (comparing both rev
952 # guarantees that "cachetip == currenttip" (comparing both rev
953 # and node) always means no nodes have been added or destroyed.
953 # and node) always means no nodes have been added or destroyed.
954
954
955 # XXX this is suboptimal when qrefresh'ing: we strip the current
955 # XXX this is suboptimal when qrefresh'ing: we strip the current
956 # head, refresh the tag cache, then immediately add a new head.
956 # head, refresh the tag cache, then immediately add a new head.
957 # But I think doing it this way is necessary for the "instant
957 # But I think doing it this way is necessary for the "instant
958 # tag cache retrieval" case to work.
958 # tag cache retrieval" case to work.
959 tags_.findglobaltags(self.ui, self, {}, {})
959 tags_.findglobaltags(self.ui, self, {}, {})
960
960
961 def walk(self, match, node=None):
961 def walk(self, match, node=None):
962 '''
962 '''
963 walk recursively through the directory tree or a given
963 walk recursively through the directory tree or a given
964 changeset, finding all files matched by the match
964 changeset, finding all files matched by the match
965 function
965 function
966 '''
966 '''
967 return self[node].walk(match)
967 return self[node].walk(match)
968
968
969 def status(self, node1='.', node2=None, match=None,
969 def status(self, node1='.', node2=None, match=None,
970 ignored=False, clean=False, unknown=False):
970 ignored=False, clean=False, unknown=False):
971 """return status of files between two nodes or node and working directory
971 """return status of files between two nodes or node and working directory
972
972
973 If node1 is None, use the first dirstate parent instead.
973 If node1 is None, use the first dirstate parent instead.
974 If node2 is None, compare node1 with working directory.
974 If node2 is None, compare node1 with working directory.
975 """
975 """
976
976
977 def mfmatches(ctx):
977 def mfmatches(ctx):
978 mf = ctx.manifest().copy()
978 mf = ctx.manifest().copy()
979 for fn in mf.keys():
979 for fn in mf.keys():
980 if not match(fn):
980 if not match(fn):
981 del mf[fn]
981 del mf[fn]
982 return mf
982 return mf
983
983
984 if isinstance(node1, context.changectx):
984 if isinstance(node1, context.changectx):
985 ctx1 = node1
985 ctx1 = node1
986 else:
986 else:
987 ctx1 = self[node1]
987 ctx1 = self[node1]
988 if isinstance(node2, context.changectx):
988 if isinstance(node2, context.changectx):
989 ctx2 = node2
989 ctx2 = node2
990 else:
990 else:
991 ctx2 = self[node2]
991 ctx2 = self[node2]
992
992
993 working = ctx2.rev() is None
993 working = ctx2.rev() is None
994 parentworking = working and ctx1 == self['.']
994 parentworking = working and ctx1 == self['.']
995 match = match or match_.always(self.root, self.getcwd())
995 match = match or match_.always(self.root, self.getcwd())
996 listignored, listclean, listunknown = ignored, clean, unknown
996 listignored, listclean, listunknown = ignored, clean, unknown
997
997
998 # load earliest manifest first for caching reasons
998 # load earliest manifest first for caching reasons
999 if not working and ctx2.rev() < ctx1.rev():
999 if not working and ctx2.rev() < ctx1.rev():
1000 ctx2.manifest()
1000 ctx2.manifest()
1001
1001
1002 if not parentworking:
1002 if not parentworking:
1003 def bad(f, msg):
1003 def bad(f, msg):
1004 if f not in ctx1:
1004 if f not in ctx1:
1005 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1005 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1006 match.bad = bad
1006 match.bad = bad
1007
1007
1008 if working: # we need to scan the working dir
1008 if working: # we need to scan the working dir
1009 subrepos = ctx1.substate.keys()
1009 subrepos = ctx1.substate.keys()
1010 s = self.dirstate.status(match, subrepos, listignored,
1010 s = self.dirstate.status(match, subrepos, listignored,
1011 listclean, listunknown)
1011 listclean, listunknown)
1012 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1012 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1013
1013
1014 # check for any possibly clean files
1014 # check for any possibly clean files
1015 if parentworking and cmp:
1015 if parentworking and cmp:
1016 fixup = []
1016 fixup = []
1017 # do a full compare of any files that might have changed
1017 # do a full compare of any files that might have changed
1018 for f in sorted(cmp):
1018 for f in sorted(cmp):
1019 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1019 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1020 or ctx1[f].cmp(ctx2[f].data())):
1020 or ctx1[f].cmp(ctx2[f].data())):
1021 modified.append(f)
1021 modified.append(f)
1022 else:
1022 else:
1023 fixup.append(f)
1023 fixup.append(f)
1024
1024
1025 if listclean:
1025 if listclean:
1026 clean += fixup
1026 clean += fixup
1027
1027
1028 # update dirstate for files that are actually clean
1028 # update dirstate for files that are actually clean
1029 if fixup:
1029 if fixup:
1030 try:
1030 try:
1031 # updating the dirstate is optional
1031 # updating the dirstate is optional
1032 # so we don't wait on the lock
1032 # so we don't wait on the lock
1033 wlock = self.wlock(False)
1033 wlock = self.wlock(False)
1034 try:
1034 try:
1035 for f in fixup:
1035 for f in fixup:
1036 self.dirstate.normal(f)
1036 self.dirstate.normal(f)
1037 finally:
1037 finally:
1038 wlock.release()
1038 wlock.release()
1039 except error.LockError:
1039 except error.LockError:
1040 pass
1040 pass
1041
1041
1042 if not parentworking:
1042 if not parentworking:
1043 mf1 = mfmatches(ctx1)
1043 mf1 = mfmatches(ctx1)
1044 if working:
1044 if working:
1045 # we are comparing working dir against non-parent
1045 # we are comparing working dir against non-parent
1046 # generate a pseudo-manifest for the working dir
1046 # generate a pseudo-manifest for the working dir
1047 mf2 = mfmatches(self['.'])
1047 mf2 = mfmatches(self['.'])
1048 for f in cmp + modified + added:
1048 for f in cmp + modified + added:
1049 mf2[f] = None
1049 mf2[f] = None
1050 mf2.set(f, ctx2.flags(f))
1050 mf2.set(f, ctx2.flags(f))
1051 for f in removed:
1051 for f in removed:
1052 if f in mf2:
1052 if f in mf2:
1053 del mf2[f]
1053 del mf2[f]
1054 else:
1054 else:
1055 # we are comparing two revisions
1055 # we are comparing two revisions
1056 deleted, unknown, ignored = [], [], []
1056 deleted, unknown, ignored = [], [], []
1057 mf2 = mfmatches(ctx2)
1057 mf2 = mfmatches(ctx2)
1058
1058
1059 modified, added, clean = [], [], []
1059 modified, added, clean = [], [], []
1060 for fn in mf2:
1060 for fn in mf2:
1061 if fn in mf1:
1061 if fn in mf1:
1062 if (mf1.flags(fn) != mf2.flags(fn) or
1062 if (mf1.flags(fn) != mf2.flags(fn) or
1063 (mf1[fn] != mf2[fn] and
1063 (mf1[fn] != mf2[fn] and
1064 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1064 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1065 modified.append(fn)
1065 modified.append(fn)
1066 elif listclean:
1066 elif listclean:
1067 clean.append(fn)
1067 clean.append(fn)
1068 del mf1[fn]
1068 del mf1[fn]
1069 else:
1069 else:
1070 added.append(fn)
1070 added.append(fn)
1071 removed = mf1.keys()
1071 removed = mf1.keys()
1072
1072
1073 r = modified, added, removed, deleted, unknown, ignored, clean
1073 r = modified, added, removed, deleted, unknown, ignored, clean
1074 [l.sort() for l in r]
1074 [l.sort() for l in r]
1075 return r
1075 return r
1076
1076
1077 def add(self, list):
1077 def add(self, list):
1078 wlock = self.wlock()
1078 wlock = self.wlock()
1079 try:
1079 try:
1080 rejected = []
1080 rejected = []
1081 for f in list:
1081 for f in list:
1082 p = self.wjoin(f)
1082 p = self.wjoin(f)
1083 try:
1083 try:
1084 st = os.lstat(p)
1084 st = os.lstat(p)
1085 except:
1085 except:
1086 self.ui.warn(_("%s does not exist!\n") % f)
1086 self.ui.warn(_("%s does not exist!\n") % f)
1087 rejected.append(f)
1087 rejected.append(f)
1088 continue
1088 continue
1089 if st.st_size > 10000000:
1089 if st.st_size > 10000000:
1090 self.ui.warn(_("%s: files over 10MB may cause memory and"
1090 self.ui.warn(_("%s: files over 10MB may cause memory and"
1091 " performance problems\n"
1091 " performance problems\n"
1092 "(use 'hg revert %s' to unadd the file)\n")
1092 "(use 'hg revert %s' to unadd the file)\n")
1093 % (f, f))
1093 % (f, f))
1094 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1094 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1095 self.ui.warn(_("%s not added: only files and symlinks "
1095 self.ui.warn(_("%s not added: only files and symlinks "
1096 "supported currently\n") % f)
1096 "supported currently\n") % f)
1097 rejected.append(p)
1097 rejected.append(p)
1098 elif self.dirstate[f] in 'amn':
1098 elif self.dirstate[f] in 'amn':
1099 self.ui.warn(_("%s already tracked!\n") % f)
1099 self.ui.warn(_("%s already tracked!\n") % f)
1100 elif self.dirstate[f] == 'r':
1100 elif self.dirstate[f] == 'r':
1101 self.dirstate.normallookup(f)
1101 self.dirstate.normallookup(f)
1102 else:
1102 else:
1103 self.dirstate.add(f)
1103 self.dirstate.add(f)
1104 return rejected
1104 return rejected
1105 finally:
1105 finally:
1106 wlock.release()
1106 wlock.release()
1107
1107
1108 def forget(self, list):
1108 def forget(self, list):
1109 wlock = self.wlock()
1109 wlock = self.wlock()
1110 try:
1110 try:
1111 for f in list:
1111 for f in list:
1112 if self.dirstate[f] != 'a':
1112 if self.dirstate[f] != 'a':
1113 self.ui.warn(_("%s not added!\n") % f)
1113 self.ui.warn(_("%s not added!\n") % f)
1114 else:
1114 else:
1115 self.dirstate.forget(f)
1115 self.dirstate.forget(f)
1116 finally:
1116 finally:
1117 wlock.release()
1117 wlock.release()
1118
1118
1119 def remove(self, list, unlink=False):
1119 def remove(self, list, unlink=False):
1120 if unlink:
1120 if unlink:
1121 for f in list:
1121 for f in list:
1122 try:
1122 try:
1123 util.unlink(self.wjoin(f))
1123 util.unlink(self.wjoin(f))
1124 except OSError, inst:
1124 except OSError, inst:
1125 if inst.errno != errno.ENOENT:
1125 if inst.errno != errno.ENOENT:
1126 raise
1126 raise
1127 wlock = self.wlock()
1127 wlock = self.wlock()
1128 try:
1128 try:
1129 for f in list:
1129 for f in list:
1130 if unlink and os.path.exists(self.wjoin(f)):
1130 if unlink and os.path.exists(self.wjoin(f)):
1131 self.ui.warn(_("%s still exists!\n") % f)
1131 self.ui.warn(_("%s still exists!\n") % f)
1132 elif self.dirstate[f] == 'a':
1132 elif self.dirstate[f] == 'a':
1133 self.dirstate.forget(f)
1133 self.dirstate.forget(f)
1134 elif f not in self.dirstate:
1134 elif f not in self.dirstate:
1135 self.ui.warn(_("%s not tracked!\n") % f)
1135 self.ui.warn(_("%s not tracked!\n") % f)
1136 else:
1136 else:
1137 self.dirstate.remove(f)
1137 self.dirstate.remove(f)
1138 finally:
1138 finally:
1139 wlock.release()
1139 wlock.release()
1140
1140
1141 def undelete(self, list):
1141 def undelete(self, list):
1142 manifests = [self.manifest.read(self.changelog.read(p)[0])
1142 manifests = [self.manifest.read(self.changelog.read(p)[0])
1143 for p in self.dirstate.parents() if p != nullid]
1143 for p in self.dirstate.parents() if p != nullid]
1144 wlock = self.wlock()
1144 wlock = self.wlock()
1145 try:
1145 try:
1146 for f in list:
1146 for f in list:
1147 if self.dirstate[f] != 'r':
1147 if self.dirstate[f] != 'r':
1148 self.ui.warn(_("%s not removed!\n") % f)
1148 self.ui.warn(_("%s not removed!\n") % f)
1149 else:
1149 else:
1150 m = f in manifests[0] and manifests[0] or manifests[1]
1150 m = f in manifests[0] and manifests[0] or manifests[1]
1151 t = self.file(f).read(m[f])
1151 t = self.file(f).read(m[f])
1152 self.wwrite(f, t, m.flags(f))
1152 self.wwrite(f, t, m.flags(f))
1153 self.dirstate.normal(f)
1153 self.dirstate.normal(f)
1154 finally:
1154 finally:
1155 wlock.release()
1155 wlock.release()
1156
1156
1157 def copy(self, source, dest):
1157 def copy(self, source, dest):
1158 p = self.wjoin(dest)
1158 p = self.wjoin(dest)
1159 if not (os.path.exists(p) or os.path.islink(p)):
1159 if not (os.path.exists(p) or os.path.islink(p)):
1160 self.ui.warn(_("%s does not exist!\n") % dest)
1160 self.ui.warn(_("%s does not exist!\n") % dest)
1161 elif not (os.path.isfile(p) or os.path.islink(p)):
1161 elif not (os.path.isfile(p) or os.path.islink(p)):
1162 self.ui.warn(_("copy failed: %s is not a file or a "
1162 self.ui.warn(_("copy failed: %s is not a file or a "
1163 "symbolic link\n") % dest)
1163 "symbolic link\n") % dest)
1164 else:
1164 else:
1165 wlock = self.wlock()
1165 wlock = self.wlock()
1166 try:
1166 try:
1167 if self.dirstate[dest] in '?r':
1167 if self.dirstate[dest] in '?r':
1168 self.dirstate.add(dest)
1168 self.dirstate.add(dest)
1169 self.dirstate.copy(source, dest)
1169 self.dirstate.copy(source, dest)
1170 finally:
1170 finally:
1171 wlock.release()
1171 wlock.release()
1172
1172
1173 def heads(self, start=None):
1173 def heads(self, start=None):
1174 heads = self.changelog.heads(start)
1174 heads = self.changelog.heads(start)
1175 # sort the output in rev descending order
1175 # sort the output in rev descending order
1176 heads = [(-self.changelog.rev(h), h) for h in heads]
1176 heads = [(-self.changelog.rev(h), h) for h in heads]
1177 return [n for (r, n) in sorted(heads)]
1177 return [n for (r, n) in sorted(heads)]
1178
1178
1179 def branchheads(self, branch=None, start=None, closed=False):
1179 def branchheads(self, branch=None, start=None, closed=False):
1180 '''return a (possibly filtered) list of heads for the given branch
1180 '''return a (possibly filtered) list of heads for the given branch
1181
1181
1182 Heads are returned in topological order, from newest to oldest.
1182 Heads are returned in topological order, from newest to oldest.
1183 If branch is None, use the dirstate branch.
1183 If branch is None, use the dirstate branch.
1184 If start is not None, return only heads reachable from start.
1184 If start is not None, return only heads reachable from start.
1185 If closed is True, return heads that are marked as closed as well.
1185 If closed is True, return heads that are marked as closed as well.
1186 '''
1186 '''
1187 if branch is None:
1187 if branch is None:
1188 branch = self[None].branch()
1188 branch = self[None].branch()
1189 branches = self.branchmap()
1189 branches = self.branchmap()
1190 if branch not in branches:
1190 if branch not in branches:
1191 return []
1191 return []
1192 # the cache returns heads ordered lowest to highest
1192 # the cache returns heads ordered lowest to highest
1193 bheads = list(reversed(branches[branch]))
1193 bheads = list(reversed(branches[branch]))
1194 if start is not None:
1194 if start is not None:
1195 # filter out the heads that cannot be reached from startrev
1195 # filter out the heads that cannot be reached from startrev
1196 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1196 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1197 bheads = [h for h in bheads if h in fbheads]
1197 bheads = [h for h in bheads if h in fbheads]
1198 if not closed:
1198 if not closed:
1199 bheads = [h for h in bheads if
1199 bheads = [h for h in bheads if
1200 ('close' not in self.changelog.read(h)[5])]
1200 ('close' not in self.changelog.read(h)[5])]
1201 return bheads
1201 return bheads
1202
1202
1203 def branches(self, nodes):
1203 def branches(self, nodes):
1204 if not nodes:
1204 if not nodes:
1205 nodes = [self.changelog.tip()]
1205 nodes = [self.changelog.tip()]
1206 b = []
1206 b = []
1207 for n in nodes:
1207 for n in nodes:
1208 t = n
1208 t = n
1209 while 1:
1209 while 1:
1210 p = self.changelog.parents(n)
1210 p = self.changelog.parents(n)
1211 if p[1] != nullid or p[0] == nullid:
1211 if p[1] != nullid or p[0] == nullid:
1212 b.append((t, n, p[0], p[1]))
1212 b.append((t, n, p[0], p[1]))
1213 break
1213 break
1214 n = p[0]
1214 n = p[0]
1215 return b
1215 return b
1216
1216
1217 def between(self, pairs):
1217 def between(self, pairs):
1218 r = []
1218 r = []
1219
1219
1220 for top, bottom in pairs:
1220 for top, bottom in pairs:
1221 n, l, i = top, [], 0
1221 n, l, i = top, [], 0
1222 f = 1
1222 f = 1
1223
1223
1224 while n != bottom and n != nullid:
1224 while n != bottom and n != nullid:
1225 p = self.changelog.parents(n)[0]
1225 p = self.changelog.parents(n)[0]
1226 if i == f:
1226 if i == f:
1227 l.append(n)
1227 l.append(n)
1228 f = f * 2
1228 f = f * 2
1229 n = p
1229 n = p
1230 i += 1
1230 i += 1
1231
1231
1232 r.append(l)
1232 r.append(l)
1233
1233
1234 return r
1234 return r
1235
1235
1236 def findincoming(self, remote, base=None, heads=None, force=False):
1236 def findincoming(self, remote, base=None, heads=None, force=False):
1237 """Return list of roots of the subsets of missing nodes from remote
1237 """Return list of roots of the subsets of missing nodes from remote
1238
1238
1239 If base dict is specified, assume that these nodes and their parents
1239 If base dict is specified, assume that these nodes and their parents
1240 exist on the remote side and that no child of a node of base exists
1240 exist on the remote side and that no child of a node of base exists
1241 in both remote and self.
1241 in both remote and self.
1242 Furthermore base will be updated to include the nodes that exists
1242 Furthermore base will be updated to include the nodes that exists
1243 in self and remote but no children exists in self and remote.
1243 in self and remote but no children exists in self and remote.
1244 If a list of heads is specified, return only nodes which are heads
1244 If a list of heads is specified, return only nodes which are heads
1245 or ancestors of these heads.
1245 or ancestors of these heads.
1246
1246
1247 All the ancestors of base are in self and in remote.
1247 All the ancestors of base are in self and in remote.
1248 All the descendants of the list returned are missing in self.
1248 All the descendants of the list returned are missing in self.
1249 (and so we know that the rest of the nodes are missing in remote, see
1249 (and so we know that the rest of the nodes are missing in remote, see
1250 outgoing)
1250 outgoing)
1251 """
1251 """
1252 return self.findcommonincoming(remote, base, heads, force)[1]
1252 return self.findcommonincoming(remote, base, heads, force)[1]
1253
1253
1254 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1254 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1255 """Return a tuple (common, missing roots, heads) used to identify
1255 """Return a tuple (common, missing roots, heads) used to identify
1256 missing nodes from remote.
1256 missing nodes from remote.
1257
1257
1258 If base dict is specified, assume that these nodes and their parents
1258 If base dict is specified, assume that these nodes and their parents
1259 exist on the remote side and that no child of a node of base exists
1259 exist on the remote side and that no child of a node of base exists
1260 in both remote and self.
1260 in both remote and self.
1261 Furthermore base will be updated to include the nodes that exists
1261 Furthermore base will be updated to include the nodes that exists
1262 in self and remote but no children exists in self and remote.
1262 in self and remote but no children exists in self and remote.
1263 If a list of heads is specified, return only nodes which are heads
1263 If a list of heads is specified, return only nodes which are heads
1264 or ancestors of these heads.
1264 or ancestors of these heads.
1265
1265
1266 All the ancestors of base are in self and in remote.
1266 All the ancestors of base are in self and in remote.
1267 """
1267 """
1268 m = self.changelog.nodemap
1268 m = self.changelog.nodemap
1269 search = []
1269 search = []
1270 fetch = set()
1270 fetch = set()
1271 seen = set()
1271 seen = set()
1272 seenbranch = set()
1272 seenbranch = set()
1273 if base is None:
1273 if base is None:
1274 base = {}
1274 base = {}
1275
1275
1276 if not heads:
1276 if not heads:
1277 heads = remote.heads()
1277 heads = remote.heads()
1278
1278
1279 if self.changelog.tip() == nullid:
1279 if self.changelog.tip() == nullid:
1280 base[nullid] = 1
1280 base[nullid] = 1
1281 if heads != [nullid]:
1281 if heads != [nullid]:
1282 return [nullid], [nullid], list(heads)
1282 return [nullid], [nullid], list(heads)
1283 return [nullid], [], []
1283 return [nullid], [], []
1284
1284
1285 # assume we're closer to the tip than the root
1285 # assume we're closer to the tip than the root
1286 # and start by examining the heads
1286 # and start by examining the heads
1287 self.ui.status(_("searching for changes\n"))
1287 self.ui.status(_("searching for changes\n"))
1288
1288
1289 unknown = []
1289 unknown = []
1290 for h in heads:
1290 for h in heads:
1291 if h not in m:
1291 if h not in m:
1292 unknown.append(h)
1292 unknown.append(h)
1293 else:
1293 else:
1294 base[h] = 1
1294 base[h] = 1
1295
1295
1296 heads = unknown
1296 heads = unknown
1297 if not unknown:
1297 if not unknown:
1298 return base.keys(), [], []
1298 return base.keys(), [], []
1299
1299
1300 req = set(unknown)
1300 req = set(unknown)
1301 reqcnt = 0
1301 reqcnt = 0
1302
1302
1303 # search through remote branches
1303 # search through remote branches
1304 # a 'branch' here is a linear segment of history, with four parts:
1304 # a 'branch' here is a linear segment of history, with four parts:
1305 # head, root, first parent, second parent
1305 # head, root, first parent, second parent
1306 # (a branch always has two parents (or none) by definition)
1306 # (a branch always has two parents (or none) by definition)
1307 unknown = remote.branches(unknown)
1307 unknown = remote.branches(unknown)
1308 while unknown:
1308 while unknown:
1309 r = []
1309 r = []
1310 while unknown:
1310 while unknown:
1311 n = unknown.pop(0)
1311 n = unknown.pop(0)
1312 if n[0] in seen:
1312 if n[0] in seen:
1313 continue
1313 continue
1314
1314
1315 self.ui.debug("examining %s:%s\n"
1315 self.ui.debug("examining %s:%s\n"
1316 % (short(n[0]), short(n[1])))
1316 % (short(n[0]), short(n[1])))
1317 if n[0] == nullid: # found the end of the branch
1317 if n[0] == nullid: # found the end of the branch
1318 pass
1318 pass
1319 elif n in seenbranch:
1319 elif n in seenbranch:
1320 self.ui.debug("branch already found\n")
1320 self.ui.debug("branch already found\n")
1321 continue
1321 continue
1322 elif n[1] and n[1] in m: # do we know the base?
1322 elif n[1] and n[1] in m: # do we know the base?
1323 self.ui.debug("found incomplete branch %s:%s\n"
1323 self.ui.debug("found incomplete branch %s:%s\n"
1324 % (short(n[0]), short(n[1])))
1324 % (short(n[0]), short(n[1])))
1325 search.append(n[0:2]) # schedule branch range for scanning
1325 search.append(n[0:2]) # schedule branch range for scanning
1326 seenbranch.add(n)
1326 seenbranch.add(n)
1327 else:
1327 else:
1328 if n[1] not in seen and n[1] not in fetch:
1328 if n[1] not in seen and n[1] not in fetch:
1329 if n[2] in m and n[3] in m:
1329 if n[2] in m and n[3] in m:
1330 self.ui.debug("found new changeset %s\n" %
1330 self.ui.debug("found new changeset %s\n" %
1331 short(n[1]))
1331 short(n[1]))
1332 fetch.add(n[1]) # earliest unknown
1332 fetch.add(n[1]) # earliest unknown
1333 for p in n[2:4]:
1333 for p in n[2:4]:
1334 if p in m:
1334 if p in m:
1335 base[p] = 1 # latest known
1335 base[p] = 1 # latest known
1336
1336
1337 for p in n[2:4]:
1337 for p in n[2:4]:
1338 if p not in req and p not in m:
1338 if p not in req and p not in m:
1339 r.append(p)
1339 r.append(p)
1340 req.add(p)
1340 req.add(p)
1341 seen.add(n[0])
1341 seen.add(n[0])
1342
1342
1343 if r:
1343 if r:
1344 reqcnt += 1
1344 reqcnt += 1
1345 self.ui.progress('searching', reqcnt, unit='queries')
1345 self.ui.debug("request %d: %s\n" %
1346 self.ui.debug("request %d: %s\n" %
1346 (reqcnt, " ".join(map(short, r))))
1347 (reqcnt, " ".join(map(short, r))))
1347 for p in xrange(0, len(r), 10):
1348 for p in xrange(0, len(r), 10):
1348 for b in remote.branches(r[p:p + 10]):
1349 for b in remote.branches(r[p:p + 10]):
1349 self.ui.debug("received %s:%s\n" %
1350 self.ui.debug("received %s:%s\n" %
1350 (short(b[0]), short(b[1])))
1351 (short(b[0]), short(b[1])))
1351 unknown.append(b)
1352 unknown.append(b)
1352
1353
1353 # do binary search on the branches we found
1354 # do binary search on the branches we found
1354 while search:
1355 while search:
1355 newsearch = []
1356 newsearch = []
1356 reqcnt += 1
1357 reqcnt += 1
1358 self.ui.progress('searching', reqcnt, unit='queries')
1357 for n, l in zip(search, remote.between(search)):
1359 for n, l in zip(search, remote.between(search)):
1358 l.append(n[1])
1360 l.append(n[1])
1359 p = n[0]
1361 p = n[0]
1360 f = 1
1362 f = 1
1361 for i in l:
1363 for i in l:
1362 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1364 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1363 if i in m:
1365 if i in m:
1364 if f <= 2:
1366 if f <= 2:
1365 self.ui.debug("found new branch changeset %s\n" %
1367 self.ui.debug("found new branch changeset %s\n" %
1366 short(p))
1368 short(p))
1367 fetch.add(p)
1369 fetch.add(p)
1368 base[i] = 1
1370 base[i] = 1
1369 else:
1371 else:
1370 self.ui.debug("narrowed branch search to %s:%s\n"
1372 self.ui.debug("narrowed branch search to %s:%s\n"
1371 % (short(p), short(i)))
1373 % (short(p), short(i)))
1372 newsearch.append((p, i))
1374 newsearch.append((p, i))
1373 break
1375 break
1374 p, f = i, f * 2
1376 p, f = i, f * 2
1375 search = newsearch
1377 search = newsearch
1376
1378
1377 # sanity check our fetch list
1379 # sanity check our fetch list
1378 for f in fetch:
1380 for f in fetch:
1379 if f in m:
1381 if f in m:
1380 raise error.RepoError(_("already have changeset ")
1382 raise error.RepoError(_("already have changeset ")
1381 + short(f[:4]))
1383 + short(f[:4]))
1382
1384
1383 if base.keys() == [nullid]:
1385 if base.keys() == [nullid]:
1384 if force:
1386 if force:
1385 self.ui.warn(_("warning: repository is unrelated\n"))
1387 self.ui.warn(_("warning: repository is unrelated\n"))
1386 else:
1388 else:
1387 raise util.Abort(_("repository is unrelated"))
1389 raise util.Abort(_("repository is unrelated"))
1388
1390
1389 self.ui.debug("found new changesets starting at " +
1391 self.ui.debug("found new changesets starting at " +
1390 " ".join([short(f) for f in fetch]) + "\n")
1392 " ".join([short(f) for f in fetch]) + "\n")
1391
1393
1394 self.ui.progress('searching', None, unit='queries')
1392 self.ui.debug("%d total queries\n" % reqcnt)
1395 self.ui.debug("%d total queries\n" % reqcnt)
1393
1396
1394 return base.keys(), list(fetch), heads
1397 return base.keys(), list(fetch), heads
1395
1398
1396 def findoutgoing(self, remote, base=None, heads=None, force=False):
1399 def findoutgoing(self, remote, base=None, heads=None, force=False):
1397 """Return list of nodes that are roots of subsets not in remote
1400 """Return list of nodes that are roots of subsets not in remote
1398
1401
1399 If base dict is specified, assume that these nodes and their parents
1402 If base dict is specified, assume that these nodes and their parents
1400 exist on the remote side.
1403 exist on the remote side.
1401 If a list of heads is specified, return only nodes which are heads
1404 If a list of heads is specified, return only nodes which are heads
1402 or ancestors of these heads, and return a second element which
1405 or ancestors of these heads, and return a second element which
1403 contains all remote heads which get new children.
1406 contains all remote heads which get new children.
1404 """
1407 """
1405 if base is None:
1408 if base is None:
1406 base = {}
1409 base = {}
1407 self.findincoming(remote, base, heads, force=force)
1410 self.findincoming(remote, base, heads, force=force)
1408
1411
1409 self.ui.debug("common changesets up to "
1412 self.ui.debug("common changesets up to "
1410 + " ".join(map(short, base.keys())) + "\n")
1413 + " ".join(map(short, base.keys())) + "\n")
1411
1414
1412 remain = set(self.changelog.nodemap)
1415 remain = set(self.changelog.nodemap)
1413
1416
1414 # prune everything remote has from the tree
1417 # prune everything remote has from the tree
1415 remain.remove(nullid)
1418 remain.remove(nullid)
1416 remove = base.keys()
1419 remove = base.keys()
1417 while remove:
1420 while remove:
1418 n = remove.pop(0)
1421 n = remove.pop(0)
1419 if n in remain:
1422 if n in remain:
1420 remain.remove(n)
1423 remain.remove(n)
1421 for p in self.changelog.parents(n):
1424 for p in self.changelog.parents(n):
1422 remove.append(p)
1425 remove.append(p)
1423
1426
1424 # find every node whose parents have been pruned
1427 # find every node whose parents have been pruned
1425 subset = []
1428 subset = []
1426 # find every remote head that will get new children
1429 # find every remote head that will get new children
1427 updated_heads = set()
1430 updated_heads = set()
1428 for n in remain:
1431 for n in remain:
1429 p1, p2 = self.changelog.parents(n)
1432 p1, p2 = self.changelog.parents(n)
1430 if p1 not in remain and p2 not in remain:
1433 if p1 not in remain and p2 not in remain:
1431 subset.append(n)
1434 subset.append(n)
1432 if heads:
1435 if heads:
1433 if p1 in heads:
1436 if p1 in heads:
1434 updated_heads.add(p1)
1437 updated_heads.add(p1)
1435 if p2 in heads:
1438 if p2 in heads:
1436 updated_heads.add(p2)
1439 updated_heads.add(p2)
1437
1440
1438 # this is the set of all roots we have to push
1441 # this is the set of all roots we have to push
1439 if heads:
1442 if heads:
1440 return subset, list(updated_heads)
1443 return subset, list(updated_heads)
1441 else:
1444 else:
1442 return subset
1445 return subset
1443
1446
1444 def pull(self, remote, heads=None, force=False):
1447 def pull(self, remote, heads=None, force=False):
1445 lock = self.lock()
1448 lock = self.lock()
1446 try:
1449 try:
1447 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1450 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1448 force=force)
1451 force=force)
1449 if fetch == [nullid]:
1452 if fetch == [nullid]:
1450 self.ui.status(_("requesting all changes\n"))
1453 self.ui.status(_("requesting all changes\n"))
1451
1454
1452 if not fetch:
1455 if not fetch:
1453 self.ui.status(_("no changes found\n"))
1456 self.ui.status(_("no changes found\n"))
1454 return 0
1457 return 0
1455
1458
1456 if heads is None and remote.capable('changegroupsubset'):
1459 if heads is None and remote.capable('changegroupsubset'):
1457 heads = rheads
1460 heads = rheads
1458
1461
1459 if heads is None:
1462 if heads is None:
1460 cg = remote.changegroup(fetch, 'pull')
1463 cg = remote.changegroup(fetch, 'pull')
1461 else:
1464 else:
1462 if not remote.capable('changegroupsubset'):
1465 if not remote.capable('changegroupsubset'):
1463 raise util.Abort(_("Partial pull cannot be done because "
1466 raise util.Abort(_("Partial pull cannot be done because "
1464 "other repository doesn't support "
1467 "other repository doesn't support "
1465 "changegroupsubset."))
1468 "changegroupsubset."))
1466 cg = remote.changegroupsubset(fetch, heads, 'pull')
1469 cg = remote.changegroupsubset(fetch, heads, 'pull')
1467 return self.addchangegroup(cg, 'pull', remote.url())
1470 return self.addchangegroup(cg, 'pull', remote.url())
1468 finally:
1471 finally:
1469 lock.release()
1472 lock.release()
1470
1473
1471 def push(self, remote, force=False, revs=None):
1474 def push(self, remote, force=False, revs=None):
1472 # there are two ways to push to remote repo:
1475 # there are two ways to push to remote repo:
1473 #
1476 #
1474 # addchangegroup assumes local user can lock remote
1477 # addchangegroup assumes local user can lock remote
1475 # repo (local filesystem, old ssh servers).
1478 # repo (local filesystem, old ssh servers).
1476 #
1479 #
1477 # unbundle assumes local user cannot lock remote repo (new ssh
1480 # unbundle assumes local user cannot lock remote repo (new ssh
1478 # servers, http servers).
1481 # servers, http servers).
1479
1482
1480 if remote.capable('unbundle'):
1483 if remote.capable('unbundle'):
1481 return self.push_unbundle(remote, force, revs)
1484 return self.push_unbundle(remote, force, revs)
1482 return self.push_addchangegroup(remote, force, revs)
1485 return self.push_addchangegroup(remote, force, revs)
1483
1486
1484 def prepush(self, remote, force, revs):
1487 def prepush(self, remote, force, revs):
1485 '''Analyze the local and remote repositories and determine which
1488 '''Analyze the local and remote repositories and determine which
1486 changesets need to be pushed to the remote. Return a tuple
1489 changesets need to be pushed to the remote. Return a tuple
1487 (changegroup, remoteheads). changegroup is a readable file-like
1490 (changegroup, remoteheads). changegroup is a readable file-like
1488 object whose read() returns successive changegroup chunks ready to
1491 object whose read() returns successive changegroup chunks ready to
1489 be sent over the wire. remoteheads is the list of remote heads.
1492 be sent over the wire. remoteheads is the list of remote heads.
1490 '''
1493 '''
1491 common = {}
1494 common = {}
1492 remote_heads = remote.heads()
1495 remote_heads = remote.heads()
1493 inc = self.findincoming(remote, common, remote_heads, force=force)
1496 inc = self.findincoming(remote, common, remote_heads, force=force)
1494
1497
1495 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1498 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1496 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1499 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1497
1500
1498 def checkbranch(lheads, rheads, updatelb, branchname=None):
1501 def checkbranch(lheads, rheads, updatelb, branchname=None):
1499 '''
1502 '''
1500 check whether there are more local heads than remote heads on
1503 check whether there are more local heads than remote heads on
1501 a specific branch.
1504 a specific branch.
1502
1505
1503 lheads: local branch heads
1506 lheads: local branch heads
1504 rheads: remote branch heads
1507 rheads: remote branch heads
1505 updatelb: outgoing local branch bases
1508 updatelb: outgoing local branch bases
1506 '''
1509 '''
1507
1510
1508 warn = 0
1511 warn = 0
1509
1512
1510 if not revs and len(lheads) > len(rheads):
1513 if not revs and len(lheads) > len(rheads):
1511 warn = 1
1514 warn = 1
1512 else:
1515 else:
1513 # add local heads involved in the push
1516 # add local heads involved in the push
1514 updatelheads = [self.changelog.heads(x, lheads)
1517 updatelheads = [self.changelog.heads(x, lheads)
1515 for x in updatelb]
1518 for x in updatelb]
1516 newheads = set(sum(updatelheads, [])) & set(lheads)
1519 newheads = set(sum(updatelheads, [])) & set(lheads)
1517
1520
1518 if not newheads:
1521 if not newheads:
1519 return True
1522 return True
1520
1523
1521 # add heads we don't have or that are not involved in the push
1524 # add heads we don't have or that are not involved in the push
1522 for r in rheads:
1525 for r in rheads:
1523 if r in self.changelog.nodemap:
1526 if r in self.changelog.nodemap:
1524 desc = self.changelog.heads(r, heads)
1527 desc = self.changelog.heads(r, heads)
1525 l = [h for h in heads if h in desc]
1528 l = [h for h in heads if h in desc]
1526 if not l:
1529 if not l:
1527 newheads.add(r)
1530 newheads.add(r)
1528 else:
1531 else:
1529 newheads.add(r)
1532 newheads.add(r)
1530 if len(newheads) > len(rheads):
1533 if len(newheads) > len(rheads):
1531 warn = 1
1534 warn = 1
1532
1535
1533 if warn:
1536 if warn:
1534 if branchname is not None:
1537 if branchname is not None:
1535 msg = _("abort: push creates new remote heads"
1538 msg = _("abort: push creates new remote heads"
1536 " on branch '%s'!\n") % branchname
1539 " on branch '%s'!\n") % branchname
1537 else:
1540 else:
1538 msg = _("abort: push creates new remote heads!\n")
1541 msg = _("abort: push creates new remote heads!\n")
1539 self.ui.warn(msg)
1542 self.ui.warn(msg)
1540 if len(lheads) > len(rheads):
1543 if len(lheads) > len(rheads):
1541 self.ui.status(_("(did you forget to merge?"
1544 self.ui.status(_("(did you forget to merge?"
1542 " use push -f to force)\n"))
1545 " use push -f to force)\n"))
1543 else:
1546 else:
1544 self.ui.status(_("(you should pull and merge or"
1547 self.ui.status(_("(you should pull and merge or"
1545 " use push -f to force)\n"))
1548 " use push -f to force)\n"))
1546 return False
1549 return False
1547 return True
1550 return True
1548
1551
1549 if not bases:
1552 if not bases:
1550 self.ui.status(_("no changes found\n"))
1553 self.ui.status(_("no changes found\n"))
1551 return None, 1
1554 return None, 1
1552 elif not force:
1555 elif not force:
1553 # Check for each named branch if we're creating new remote heads.
1556 # Check for each named branch if we're creating new remote heads.
1554 # To be a remote head after push, node must be either:
1557 # To be a remote head after push, node must be either:
1555 # - unknown locally
1558 # - unknown locally
1556 # - a local outgoing head descended from update
1559 # - a local outgoing head descended from update
1557 # - a remote head that's known locally and not
1560 # - a remote head that's known locally and not
1558 # ancestral to an outgoing head
1561 # ancestral to an outgoing head
1559 #
1562 #
1560 # New named branches cannot be created without --force.
1563 # New named branches cannot be created without --force.
1561
1564
1562 if remote_heads != [nullid]:
1565 if remote_heads != [nullid]:
1563 if remote.capable('branchmap'):
1566 if remote.capable('branchmap'):
1564 remotebrheads = remote.branchmap()
1567 remotebrheads = remote.branchmap()
1565
1568
1566 if not revs:
1569 if not revs:
1567 localbrheads = self.branchmap()
1570 localbrheads = self.branchmap()
1568 else:
1571 else:
1569 localbrheads = {}
1572 localbrheads = {}
1570 for n in heads:
1573 for n in heads:
1571 branch = self[n].branch()
1574 branch = self[n].branch()
1572 localbrheads.setdefault(branch, []).append(n)
1575 localbrheads.setdefault(branch, []).append(n)
1573
1576
1574 newbranches = list(set(localbrheads) - set(remotebrheads))
1577 newbranches = list(set(localbrheads) - set(remotebrheads))
1575 if newbranches: # new branch requires --force
1578 if newbranches: # new branch requires --force
1576 branchnames = ', '.join("%s" % b for b in newbranches)
1579 branchnames = ', '.join("%s" % b for b in newbranches)
1577 self.ui.warn(_("abort: push creates "
1580 self.ui.warn(_("abort: push creates "
1578 "new remote branches: %s!\n")
1581 "new remote branches: %s!\n")
1579 % branchnames)
1582 % branchnames)
1580 # propose 'push -b .' in the msg too?
1583 # propose 'push -b .' in the msg too?
1581 self.ui.status(_("(use 'hg push -f' to force)\n"))
1584 self.ui.status(_("(use 'hg push -f' to force)\n"))
1582 return None, 0
1585 return None, 0
1583 for branch, lheads in localbrheads.iteritems():
1586 for branch, lheads in localbrheads.iteritems():
1584 if branch in remotebrheads:
1587 if branch in remotebrheads:
1585 rheads = remotebrheads[branch]
1588 rheads = remotebrheads[branch]
1586 if not checkbranch(lheads, rheads, update, branch):
1589 if not checkbranch(lheads, rheads, update, branch):
1587 return None, 0
1590 return None, 0
1588 else:
1591 else:
1589 if not checkbranch(heads, remote_heads, update):
1592 if not checkbranch(heads, remote_heads, update):
1590 return None, 0
1593 return None, 0
1591
1594
1592 if inc:
1595 if inc:
1593 self.ui.warn(_("note: unsynced remote changes!\n"))
1596 self.ui.warn(_("note: unsynced remote changes!\n"))
1594
1597
1595
1598
1596 if revs is None:
1599 if revs is None:
1597 # use the fast path, no race possible on push
1600 # use the fast path, no race possible on push
1598 nodes = self.changelog.findmissing(common.keys())
1601 nodes = self.changelog.findmissing(common.keys())
1599 cg = self._changegroup(nodes, 'push')
1602 cg = self._changegroup(nodes, 'push')
1600 else:
1603 else:
1601 cg = self.changegroupsubset(update, revs, 'push')
1604 cg = self.changegroupsubset(update, revs, 'push')
1602 return cg, remote_heads
1605 return cg, remote_heads
1603
1606
1604 def push_addchangegroup(self, remote, force, revs):
1607 def push_addchangegroup(self, remote, force, revs):
1605 lock = remote.lock()
1608 lock = remote.lock()
1606 try:
1609 try:
1607 ret = self.prepush(remote, force, revs)
1610 ret = self.prepush(remote, force, revs)
1608 if ret[0] is not None:
1611 if ret[0] is not None:
1609 cg, remote_heads = ret
1612 cg, remote_heads = ret
1610 return remote.addchangegroup(cg, 'push', self.url())
1613 return remote.addchangegroup(cg, 'push', self.url())
1611 return ret[1]
1614 return ret[1]
1612 finally:
1615 finally:
1613 lock.release()
1616 lock.release()
1614
1617
1615 def push_unbundle(self, remote, force, revs):
1618 def push_unbundle(self, remote, force, revs):
1616 # local repo finds heads on server, finds out what revs it
1619 # local repo finds heads on server, finds out what revs it
1617 # must push. once revs transferred, if server finds it has
1620 # must push. once revs transferred, if server finds it has
1618 # different heads (someone else won commit/push race), server
1621 # different heads (someone else won commit/push race), server
1619 # aborts.
1622 # aborts.
1620
1623
1621 ret = self.prepush(remote, force, revs)
1624 ret = self.prepush(remote, force, revs)
1622 if ret[0] is not None:
1625 if ret[0] is not None:
1623 cg, remote_heads = ret
1626 cg, remote_heads = ret
1624 if force:
1627 if force:
1625 remote_heads = ['force']
1628 remote_heads = ['force']
1626 return remote.unbundle(cg, remote_heads, 'push')
1629 return remote.unbundle(cg, remote_heads, 'push')
1627 return ret[1]
1630 return ret[1]
1628
1631
1629 def changegroupinfo(self, nodes, source):
1632 def changegroupinfo(self, nodes, source):
1630 if self.ui.verbose or source == 'bundle':
1633 if self.ui.verbose or source == 'bundle':
1631 self.ui.status(_("%d changesets found\n") % len(nodes))
1634 self.ui.status(_("%d changesets found\n") % len(nodes))
1632 if self.ui.debugflag:
1635 if self.ui.debugflag:
1633 self.ui.debug("list of changesets:\n")
1636 self.ui.debug("list of changesets:\n")
1634 for node in nodes:
1637 for node in nodes:
1635 self.ui.debug("%s\n" % hex(node))
1638 self.ui.debug("%s\n" % hex(node))
1636
1639
1637 def changegroupsubset(self, bases, heads, source, extranodes=None):
1640 def changegroupsubset(self, bases, heads, source, extranodes=None):
1638 """Compute a changegroup consisting of all the nodes that are
1641 """Compute a changegroup consisting of all the nodes that are
1639 descendents of any of the bases and ancestors of any of the heads.
1642 descendents of any of the bases and ancestors of any of the heads.
1640 Return a chunkbuffer object whose read() method will return
1643 Return a chunkbuffer object whose read() method will return
1641 successive changegroup chunks.
1644 successive changegroup chunks.
1642
1645
1643 It is fairly complex as determining which filenodes and which
1646 It is fairly complex as determining which filenodes and which
1644 manifest nodes need to be included for the changeset to be complete
1647 manifest nodes need to be included for the changeset to be complete
1645 is non-trivial.
1648 is non-trivial.
1646
1649
1647 Another wrinkle is doing the reverse, figuring out which changeset in
1650 Another wrinkle is doing the reverse, figuring out which changeset in
1648 the changegroup a particular filenode or manifestnode belongs to.
1651 the changegroup a particular filenode or manifestnode belongs to.
1649
1652
1650 The caller can specify some nodes that must be included in the
1653 The caller can specify some nodes that must be included in the
1651 changegroup using the extranodes argument. It should be a dict
1654 changegroup using the extranodes argument. It should be a dict
1652 where the keys are the filenames (or 1 for the manifest), and the
1655 where the keys are the filenames (or 1 for the manifest), and the
1653 values are lists of (node, linknode) tuples, where node is a wanted
1656 values are lists of (node, linknode) tuples, where node is a wanted
1654 node and linknode is the changelog node that should be transmitted as
1657 node and linknode is the changelog node that should be transmitted as
1655 the linkrev.
1658 the linkrev.
1656 """
1659 """
1657
1660
1658 # Set up some initial variables
1661 # Set up some initial variables
1659 # Make it easy to refer to self.changelog
1662 # Make it easy to refer to self.changelog
1660 cl = self.changelog
1663 cl = self.changelog
1661 # msng is short for missing - compute the list of changesets in this
1664 # msng is short for missing - compute the list of changesets in this
1662 # changegroup.
1665 # changegroup.
1663 if not bases:
1666 if not bases:
1664 bases = [nullid]
1667 bases = [nullid]
1665 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1668 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1666
1669
1667 if extranodes is None:
1670 if extranodes is None:
1668 # can we go through the fast path ?
1671 # can we go through the fast path ?
1669 heads.sort()
1672 heads.sort()
1670 allheads = self.heads()
1673 allheads = self.heads()
1671 allheads.sort()
1674 allheads.sort()
1672 if heads == allheads:
1675 if heads == allheads:
1673 return self._changegroup(msng_cl_lst, source)
1676 return self._changegroup(msng_cl_lst, source)
1674
1677
1675 # slow path
1678 # slow path
1676 self.hook('preoutgoing', throw=True, source=source)
1679 self.hook('preoutgoing', throw=True, source=source)
1677
1680
1678 self.changegroupinfo(msng_cl_lst, source)
1681 self.changegroupinfo(msng_cl_lst, source)
1679 # Some bases may turn out to be superfluous, and some heads may be
1682 # Some bases may turn out to be superfluous, and some heads may be
1680 # too. nodesbetween will return the minimal set of bases and heads
1683 # too. nodesbetween will return the minimal set of bases and heads
1681 # necessary to re-create the changegroup.
1684 # necessary to re-create the changegroup.
1682
1685
1683 # Known heads are the list of heads that it is assumed the recipient
1686 # Known heads are the list of heads that it is assumed the recipient
1684 # of this changegroup will know about.
1687 # of this changegroup will know about.
1685 knownheads = set()
1688 knownheads = set()
1686 # We assume that all parents of bases are known heads.
1689 # We assume that all parents of bases are known heads.
1687 for n in bases:
1690 for n in bases:
1688 knownheads.update(cl.parents(n))
1691 knownheads.update(cl.parents(n))
1689 knownheads.discard(nullid)
1692 knownheads.discard(nullid)
1690 knownheads = list(knownheads)
1693 knownheads = list(knownheads)
1691 if knownheads:
1694 if knownheads:
1692 # Now that we know what heads are known, we can compute which
1695 # Now that we know what heads are known, we can compute which
1693 # changesets are known. The recipient must know about all
1696 # changesets are known. The recipient must know about all
1694 # changesets required to reach the known heads from the null
1697 # changesets required to reach the known heads from the null
1695 # changeset.
1698 # changeset.
1696 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1699 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1697 junk = None
1700 junk = None
1698 # Transform the list into a set.
1701 # Transform the list into a set.
1699 has_cl_set = set(has_cl_set)
1702 has_cl_set = set(has_cl_set)
1700 else:
1703 else:
1701 # If there were no known heads, the recipient cannot be assumed to
1704 # If there were no known heads, the recipient cannot be assumed to
1702 # know about any changesets.
1705 # know about any changesets.
1703 has_cl_set = set()
1706 has_cl_set = set()
1704
1707
1705 # Make it easy to refer to self.manifest
1708 # Make it easy to refer to self.manifest
1706 mnfst = self.manifest
1709 mnfst = self.manifest
1707 # We don't know which manifests are missing yet
1710 # We don't know which manifests are missing yet
1708 msng_mnfst_set = {}
1711 msng_mnfst_set = {}
1709 # Nor do we know which filenodes are missing.
1712 # Nor do we know which filenodes are missing.
1710 msng_filenode_set = {}
1713 msng_filenode_set = {}
1711
1714
1712 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1715 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1713 junk = None
1716 junk = None
1714
1717
1715 # A changeset always belongs to itself, so the changenode lookup
1718 # A changeset always belongs to itself, so the changenode lookup
1716 # function for a changenode is identity.
1719 # function for a changenode is identity.
1717 def identity(x):
1720 def identity(x):
1718 return x
1721 return x
1719
1722
1720 # If we determine that a particular file or manifest node must be a
1723 # If we determine that a particular file or manifest node must be a
1721 # node that the recipient of the changegroup will already have, we can
1724 # node that the recipient of the changegroup will already have, we can
1722 # also assume the recipient will have all the parents. This function
1725 # also assume the recipient will have all the parents. This function
1723 # prunes them from the set of missing nodes.
1726 # prunes them from the set of missing nodes.
1724 def prune_parents(revlog, hasset, msngset):
1727 def prune_parents(revlog, hasset, msngset):
1725 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1728 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1726 msngset.pop(revlog.node(r), None)
1729 msngset.pop(revlog.node(r), None)
1727
1730
1728 # Use the information collected in collect_manifests_and_files to say
1731 # Use the information collected in collect_manifests_and_files to say
1729 # which changenode any manifestnode belongs to.
1732 # which changenode any manifestnode belongs to.
1730 def lookup_manifest_link(mnfstnode):
1733 def lookup_manifest_link(mnfstnode):
1731 return msng_mnfst_set[mnfstnode]
1734 return msng_mnfst_set[mnfstnode]
1732
1735
1733 # A function generating function that sets up the initial environment
1736 # A function generating function that sets up the initial environment
1734 # the inner function.
1737 # the inner function.
1735 def filenode_collector(changedfiles):
1738 def filenode_collector(changedfiles):
1736 # This gathers information from each manifestnode included in the
1739 # This gathers information from each manifestnode included in the
1737 # changegroup about which filenodes the manifest node references
1740 # changegroup about which filenodes the manifest node references
1738 # so we can include those in the changegroup too.
1741 # so we can include those in the changegroup too.
1739 #
1742 #
1740 # It also remembers which changenode each filenode belongs to. It
1743 # It also remembers which changenode each filenode belongs to. It
1741 # does this by assuming the a filenode belongs to the changenode
1744 # does this by assuming the a filenode belongs to the changenode
1742 # the first manifest that references it belongs to.
1745 # the first manifest that references it belongs to.
1743 def collect_msng_filenodes(mnfstnode):
1746 def collect_msng_filenodes(mnfstnode):
1744 r = mnfst.rev(mnfstnode)
1747 r = mnfst.rev(mnfstnode)
1745 if r - 1 in mnfst.parentrevs(r):
1748 if r - 1 in mnfst.parentrevs(r):
1746 # If the previous rev is one of the parents,
1749 # If the previous rev is one of the parents,
1747 # we only need to see a diff.
1750 # we only need to see a diff.
1748 deltamf = mnfst.readdelta(mnfstnode)
1751 deltamf = mnfst.readdelta(mnfstnode)
1749 # For each line in the delta
1752 # For each line in the delta
1750 for f, fnode in deltamf.iteritems():
1753 for f, fnode in deltamf.iteritems():
1751 f = changedfiles.get(f, None)
1754 f = changedfiles.get(f, None)
1752 # And if the file is in the list of files we care
1755 # And if the file is in the list of files we care
1753 # about.
1756 # about.
1754 if f is not None:
1757 if f is not None:
1755 # Get the changenode this manifest belongs to
1758 # Get the changenode this manifest belongs to
1756 clnode = msng_mnfst_set[mnfstnode]
1759 clnode = msng_mnfst_set[mnfstnode]
1757 # Create the set of filenodes for the file if
1760 # Create the set of filenodes for the file if
1758 # there isn't one already.
1761 # there isn't one already.
1759 ndset = msng_filenode_set.setdefault(f, {})
1762 ndset = msng_filenode_set.setdefault(f, {})
1760 # And set the filenode's changelog node to the
1763 # And set the filenode's changelog node to the
1761 # manifest's if it hasn't been set already.
1764 # manifest's if it hasn't been set already.
1762 ndset.setdefault(fnode, clnode)
1765 ndset.setdefault(fnode, clnode)
1763 else:
1766 else:
1764 # Otherwise we need a full manifest.
1767 # Otherwise we need a full manifest.
1765 m = mnfst.read(mnfstnode)
1768 m = mnfst.read(mnfstnode)
1766 # For every file in we care about.
1769 # For every file in we care about.
1767 for f in changedfiles:
1770 for f in changedfiles:
1768 fnode = m.get(f, None)
1771 fnode = m.get(f, None)
1769 # If it's in the manifest
1772 # If it's in the manifest
1770 if fnode is not None:
1773 if fnode is not None:
1771 # See comments above.
1774 # See comments above.
1772 clnode = msng_mnfst_set[mnfstnode]
1775 clnode = msng_mnfst_set[mnfstnode]
1773 ndset = msng_filenode_set.setdefault(f, {})
1776 ndset = msng_filenode_set.setdefault(f, {})
1774 ndset.setdefault(fnode, clnode)
1777 ndset.setdefault(fnode, clnode)
1775 return collect_msng_filenodes
1778 return collect_msng_filenodes
1776
1779
1777 # We have a list of filenodes we think we need for a file, lets remove
1780 # We have a list of filenodes we think we need for a file, lets remove
1778 # all those we know the recipient must have.
1781 # all those we know the recipient must have.
1779 def prune_filenodes(f, filerevlog):
1782 def prune_filenodes(f, filerevlog):
1780 msngset = msng_filenode_set[f]
1783 msngset = msng_filenode_set[f]
1781 hasset = set()
1784 hasset = set()
1782 # If a 'missing' filenode thinks it belongs to a changenode we
1785 # If a 'missing' filenode thinks it belongs to a changenode we
1783 # assume the recipient must have, then the recipient must have
1786 # assume the recipient must have, then the recipient must have
1784 # that filenode.
1787 # that filenode.
1785 for n in msngset:
1788 for n in msngset:
1786 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1789 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1787 if clnode in has_cl_set:
1790 if clnode in has_cl_set:
1788 hasset.add(n)
1791 hasset.add(n)
1789 prune_parents(filerevlog, hasset, msngset)
1792 prune_parents(filerevlog, hasset, msngset)
1790
1793
1791 # A function generator function that sets up the a context for the
1794 # A function generator function that sets up the a context for the
1792 # inner function.
1795 # inner function.
1793 def lookup_filenode_link_func(fname):
1796 def lookup_filenode_link_func(fname):
1794 msngset = msng_filenode_set[fname]
1797 msngset = msng_filenode_set[fname]
1795 # Lookup the changenode the filenode belongs to.
1798 # Lookup the changenode the filenode belongs to.
1796 def lookup_filenode_link(fnode):
1799 def lookup_filenode_link(fnode):
1797 return msngset[fnode]
1800 return msngset[fnode]
1798 return lookup_filenode_link
1801 return lookup_filenode_link
1799
1802
1800 # Add the nodes that were explicitly requested.
1803 # Add the nodes that were explicitly requested.
1801 def add_extra_nodes(name, nodes):
1804 def add_extra_nodes(name, nodes):
1802 if not extranodes or name not in extranodes:
1805 if not extranodes or name not in extranodes:
1803 return
1806 return
1804
1807
1805 for node, linknode in extranodes[name]:
1808 for node, linknode in extranodes[name]:
1806 if node not in nodes:
1809 if node not in nodes:
1807 nodes[node] = linknode
1810 nodes[node] = linknode
1808
1811
1809 # Now that we have all theses utility functions to help out and
1812 # Now that we have all theses utility functions to help out and
1810 # logically divide up the task, generate the group.
1813 # logically divide up the task, generate the group.
1811 def gengroup():
1814 def gengroup():
1812 # The set of changed files starts empty.
1815 # The set of changed files starts empty.
1813 changedfiles = {}
1816 changedfiles = {}
1814 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1817 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1815
1818
1816 # Create a changenode group generator that will call our functions
1819 # Create a changenode group generator that will call our functions
1817 # back to lookup the owning changenode and collect information.
1820 # back to lookup the owning changenode and collect information.
1818 group = cl.group(msng_cl_lst, identity, collect)
1821 group = cl.group(msng_cl_lst, identity, collect)
1819 cnt = 0
1822 cnt = 0
1820 for chnk in group:
1823 for chnk in group:
1821 yield chnk
1824 yield chnk
1822 self.ui.progress('bundle changes', cnt, unit='chunks')
1825 self.ui.progress('bundle changes', cnt, unit='chunks')
1823 cnt += 1
1826 cnt += 1
1824 self.ui.progress('bundle changes', None, unit='chunks')
1827 self.ui.progress('bundle changes', None, unit='chunks')
1825
1828
1826
1829
1827 # Figure out which manifest nodes (of the ones we think might be
1830 # Figure out which manifest nodes (of the ones we think might be
1828 # part of the changegroup) the recipient must know about and
1831 # part of the changegroup) the recipient must know about and
1829 # remove them from the changegroup.
1832 # remove them from the changegroup.
1830 has_mnfst_set = set()
1833 has_mnfst_set = set()
1831 for n in msng_mnfst_set:
1834 for n in msng_mnfst_set:
1832 # If a 'missing' manifest thinks it belongs to a changenode
1835 # If a 'missing' manifest thinks it belongs to a changenode
1833 # the recipient is assumed to have, obviously the recipient
1836 # the recipient is assumed to have, obviously the recipient
1834 # must have that manifest.
1837 # must have that manifest.
1835 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1838 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1836 if linknode in has_cl_set:
1839 if linknode in has_cl_set:
1837 has_mnfst_set.add(n)
1840 has_mnfst_set.add(n)
1838 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1841 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1839 add_extra_nodes(1, msng_mnfst_set)
1842 add_extra_nodes(1, msng_mnfst_set)
1840 msng_mnfst_lst = msng_mnfst_set.keys()
1843 msng_mnfst_lst = msng_mnfst_set.keys()
1841 # Sort the manifestnodes by revision number.
1844 # Sort the manifestnodes by revision number.
1842 msng_mnfst_lst.sort(key=mnfst.rev)
1845 msng_mnfst_lst.sort(key=mnfst.rev)
1843 # Create a generator for the manifestnodes that calls our lookup
1846 # Create a generator for the manifestnodes that calls our lookup
1844 # and data collection functions back.
1847 # and data collection functions back.
1845 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1848 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1846 filenode_collector(changedfiles))
1849 filenode_collector(changedfiles))
1847 cnt = 0
1850 cnt = 0
1848 for chnk in group:
1851 for chnk in group:
1849 yield chnk
1852 yield chnk
1850 self.ui.progress('bundle manifests', cnt, unit='chunks')
1853 self.ui.progress('bundle manifests', cnt, unit='chunks')
1851 cnt += 1
1854 cnt += 1
1852 self.ui.progress('bundle manifests', None, unit='chunks')
1855 self.ui.progress('bundle manifests', None, unit='chunks')
1853
1856
1854 # These are no longer needed, dereference and toss the memory for
1857 # These are no longer needed, dereference and toss the memory for
1855 # them.
1858 # them.
1856 msng_mnfst_lst = None
1859 msng_mnfst_lst = None
1857 msng_mnfst_set.clear()
1860 msng_mnfst_set.clear()
1858
1861
1859 if extranodes:
1862 if extranodes:
1860 for fname in extranodes:
1863 for fname in extranodes:
1861 if isinstance(fname, int):
1864 if isinstance(fname, int):
1862 continue
1865 continue
1863 msng_filenode_set.setdefault(fname, {})
1866 msng_filenode_set.setdefault(fname, {})
1864 changedfiles[fname] = 1
1867 changedfiles[fname] = 1
1865 # Go through all our files in order sorted by name.
1868 # Go through all our files in order sorted by name.
1866 cnt = 0
1869 cnt = 0
1867 for fname in sorted(changedfiles):
1870 for fname in sorted(changedfiles):
1868 filerevlog = self.file(fname)
1871 filerevlog = self.file(fname)
1869 if not len(filerevlog):
1872 if not len(filerevlog):
1870 raise util.Abort(_("empty or missing revlog for %s") % fname)
1873 raise util.Abort(_("empty or missing revlog for %s") % fname)
1871 # Toss out the filenodes that the recipient isn't really
1874 # Toss out the filenodes that the recipient isn't really
1872 # missing.
1875 # missing.
1873 if fname in msng_filenode_set:
1876 if fname in msng_filenode_set:
1874 prune_filenodes(fname, filerevlog)
1877 prune_filenodes(fname, filerevlog)
1875 add_extra_nodes(fname, msng_filenode_set[fname])
1878 add_extra_nodes(fname, msng_filenode_set[fname])
1876 msng_filenode_lst = msng_filenode_set[fname].keys()
1879 msng_filenode_lst = msng_filenode_set[fname].keys()
1877 else:
1880 else:
1878 msng_filenode_lst = []
1881 msng_filenode_lst = []
1879 # If any filenodes are left, generate the group for them,
1882 # If any filenodes are left, generate the group for them,
1880 # otherwise don't bother.
1883 # otherwise don't bother.
1881 if len(msng_filenode_lst) > 0:
1884 if len(msng_filenode_lst) > 0:
1882 yield changegroup.chunkheader(len(fname))
1885 yield changegroup.chunkheader(len(fname))
1883 yield fname
1886 yield fname
1884 # Sort the filenodes by their revision #
1887 # Sort the filenodes by their revision #
1885 msng_filenode_lst.sort(key=filerevlog.rev)
1888 msng_filenode_lst.sort(key=filerevlog.rev)
1886 # Create a group generator and only pass in a changenode
1889 # Create a group generator and only pass in a changenode
1887 # lookup function as we need to collect no information
1890 # lookup function as we need to collect no information
1888 # from filenodes.
1891 # from filenodes.
1889 group = filerevlog.group(msng_filenode_lst,
1892 group = filerevlog.group(msng_filenode_lst,
1890 lookup_filenode_link_func(fname))
1893 lookup_filenode_link_func(fname))
1891 for chnk in group:
1894 for chnk in group:
1892 self.ui.progress(
1895 self.ui.progress(
1893 'bundle files', cnt, item=fname, unit='chunks')
1896 'bundle files', cnt, item=fname, unit='chunks')
1894 cnt += 1
1897 cnt += 1
1895 yield chnk
1898 yield chnk
1896 if fname in msng_filenode_set:
1899 if fname in msng_filenode_set:
1897 # Don't need this anymore, toss it to free memory.
1900 # Don't need this anymore, toss it to free memory.
1898 del msng_filenode_set[fname]
1901 del msng_filenode_set[fname]
1899 # Signal that no more groups are left.
1902 # Signal that no more groups are left.
1900 yield changegroup.closechunk()
1903 yield changegroup.closechunk()
1901 self.ui.progress('bundle files', None, unit='chunks')
1904 self.ui.progress('bundle files', None, unit='chunks')
1902
1905
1903 if msng_cl_lst:
1906 if msng_cl_lst:
1904 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1907 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1905
1908
1906 return util.chunkbuffer(gengroup())
1909 return util.chunkbuffer(gengroup())
1907
1910
1908 def changegroup(self, basenodes, source):
1911 def changegroup(self, basenodes, source):
1909 # to avoid a race we use changegroupsubset() (issue1320)
1912 # to avoid a race we use changegroupsubset() (issue1320)
1910 return self.changegroupsubset(basenodes, self.heads(), source)
1913 return self.changegroupsubset(basenodes, self.heads(), source)
1911
1914
1912 def _changegroup(self, nodes, source):
1915 def _changegroup(self, nodes, source):
1913 """Compute the changegroup of all nodes that we have that a recipient
1916 """Compute the changegroup of all nodes that we have that a recipient
1914 doesn't. Return a chunkbuffer object whose read() method will return
1917 doesn't. Return a chunkbuffer object whose read() method will return
1915 successive changegroup chunks.
1918 successive changegroup chunks.
1916
1919
1917 This is much easier than the previous function as we can assume that
1920 This is much easier than the previous function as we can assume that
1918 the recipient has any changenode we aren't sending them.
1921 the recipient has any changenode we aren't sending them.
1919
1922
1920 nodes is the set of nodes to send"""
1923 nodes is the set of nodes to send"""
1921
1924
1922 self.hook('preoutgoing', throw=True, source=source)
1925 self.hook('preoutgoing', throw=True, source=source)
1923
1926
1924 cl = self.changelog
1927 cl = self.changelog
1925 revset = set([cl.rev(n) for n in nodes])
1928 revset = set([cl.rev(n) for n in nodes])
1926 self.changegroupinfo(nodes, source)
1929 self.changegroupinfo(nodes, source)
1927
1930
1928 def identity(x):
1931 def identity(x):
1929 return x
1932 return x
1930
1933
1931 def gennodelst(log):
1934 def gennodelst(log):
1932 for r in log:
1935 for r in log:
1933 if log.linkrev(r) in revset:
1936 if log.linkrev(r) in revset:
1934 yield log.node(r)
1937 yield log.node(r)
1935
1938
1936 def lookuprevlink_func(revlog):
1939 def lookuprevlink_func(revlog):
1937 def lookuprevlink(n):
1940 def lookuprevlink(n):
1938 return cl.node(revlog.linkrev(revlog.rev(n)))
1941 return cl.node(revlog.linkrev(revlog.rev(n)))
1939 return lookuprevlink
1942 return lookuprevlink
1940
1943
1941 def gengroup():
1944 def gengroup():
1942 '''yield a sequence of changegroup chunks (strings)'''
1945 '''yield a sequence of changegroup chunks (strings)'''
1943 # construct a list of all changed files
1946 # construct a list of all changed files
1944 changedfiles = {}
1947 changedfiles = {}
1945 mmfs = {}
1948 mmfs = {}
1946 collect = changegroup.collector(cl, mmfs, changedfiles)
1949 collect = changegroup.collector(cl, mmfs, changedfiles)
1947
1950
1948 cnt = 0
1951 cnt = 0
1949 for chnk in cl.group(nodes, identity, collect):
1952 for chnk in cl.group(nodes, identity, collect):
1950 self.ui.progress('bundle changes', cnt, unit='chunks')
1953 self.ui.progress('bundle changes', cnt, unit='chunks')
1951 cnt += 1
1954 cnt += 1
1952 yield chnk
1955 yield chnk
1953 self.ui.progress('bundle changes', None, unit='chunks')
1956 self.ui.progress('bundle changes', None, unit='chunks')
1954
1957
1955 mnfst = self.manifest
1958 mnfst = self.manifest
1956 nodeiter = gennodelst(mnfst)
1959 nodeiter = gennodelst(mnfst)
1957 cnt = 0
1960 cnt = 0
1958 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1961 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1959 self.ui.progress('bundle manifests', cnt, unit='chunks')
1962 self.ui.progress('bundle manifests', cnt, unit='chunks')
1960 cnt += 1
1963 cnt += 1
1961 yield chnk
1964 yield chnk
1962 self.ui.progress('bundle manifests', None, unit='chunks')
1965 self.ui.progress('bundle manifests', None, unit='chunks')
1963
1966
1964 cnt = 0
1967 cnt = 0
1965 for fname in sorted(changedfiles):
1968 for fname in sorted(changedfiles):
1966 filerevlog = self.file(fname)
1969 filerevlog = self.file(fname)
1967 if not len(filerevlog):
1970 if not len(filerevlog):
1968 raise util.Abort(_("empty or missing revlog for %s") % fname)
1971 raise util.Abort(_("empty or missing revlog for %s") % fname)
1969 nodeiter = gennodelst(filerevlog)
1972 nodeiter = gennodelst(filerevlog)
1970 nodeiter = list(nodeiter)
1973 nodeiter = list(nodeiter)
1971 if nodeiter:
1974 if nodeiter:
1972 yield changegroup.chunkheader(len(fname))
1975 yield changegroup.chunkheader(len(fname))
1973 yield fname
1976 yield fname
1974 lookup = lookuprevlink_func(filerevlog)
1977 lookup = lookuprevlink_func(filerevlog)
1975 for chnk in filerevlog.group(nodeiter, lookup):
1978 for chnk in filerevlog.group(nodeiter, lookup):
1976 self.ui.progress(
1979 self.ui.progress(
1977 'bundle files', cnt, item=fname, unit='chunks')
1980 'bundle files', cnt, item=fname, unit='chunks')
1978 cnt += 1
1981 cnt += 1
1979 yield chnk
1982 yield chnk
1980 self.ui.progress('bundle files', None, unit='chunks')
1983 self.ui.progress('bundle files', None, unit='chunks')
1981
1984
1982 yield changegroup.closechunk()
1985 yield changegroup.closechunk()
1983
1986
1984 if nodes:
1987 if nodes:
1985 self.hook('outgoing', node=hex(nodes[0]), source=source)
1988 self.hook('outgoing', node=hex(nodes[0]), source=source)
1986
1989
1987 return util.chunkbuffer(gengroup())
1990 return util.chunkbuffer(gengroup())
1988
1991
1989 def addchangegroup(self, source, srctype, url, emptyok=False):
1992 def addchangegroup(self, source, srctype, url, emptyok=False):
1990 """add changegroup to repo.
1993 """add changegroup to repo.
1991
1994
1992 return values:
1995 return values:
1993 - nothing changed or no source: 0
1996 - nothing changed or no source: 0
1994 - more heads than before: 1+added heads (2..n)
1997 - more heads than before: 1+added heads (2..n)
1995 - less heads than before: -1-removed heads (-2..-n)
1998 - less heads than before: -1-removed heads (-2..-n)
1996 - number of heads stays the same: 1
1999 - number of heads stays the same: 1
1997 """
2000 """
1998 def csmap(x):
2001 def csmap(x):
1999 self.ui.debug("add changeset %s\n" % short(x))
2002 self.ui.debug("add changeset %s\n" % short(x))
2000 return len(cl)
2003 return len(cl)
2001
2004
2002 def revmap(x):
2005 def revmap(x):
2003 return cl.rev(x)
2006 return cl.rev(x)
2004
2007
2005 if not source:
2008 if not source:
2006 return 0
2009 return 0
2007
2010
2008 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2011 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2009
2012
2010 changesets = files = revisions = 0
2013 changesets = files = revisions = 0
2011
2014
2012 # write changelog data to temp files so concurrent readers will not see
2015 # write changelog data to temp files so concurrent readers will not see
2013 # inconsistent view
2016 # inconsistent view
2014 cl = self.changelog
2017 cl = self.changelog
2015 cl.delayupdate()
2018 cl.delayupdate()
2016 oldheads = len(cl.heads())
2019 oldheads = len(cl.heads())
2017
2020
2018 tr = self.transaction()
2021 tr = self.transaction()
2019 try:
2022 try:
2020 trp = weakref.proxy(tr)
2023 trp = weakref.proxy(tr)
2021 # pull off the changeset group
2024 # pull off the changeset group
2022 self.ui.status(_("adding changesets\n"))
2025 self.ui.status(_("adding changesets\n"))
2023 clstart = len(cl)
2026 clstart = len(cl)
2024 class prog(object):
2027 class prog(object):
2025 step = 'changesets'
2028 step = 'changesets'
2026 count = 1
2029 count = 1
2027 ui = self.ui
2030 ui = self.ui
2028 def __call__(self):
2031 def __call__(self):
2029 self.ui.progress(self.step, self.count, unit='chunks')
2032 self.ui.progress(self.step, self.count, unit='chunks')
2030 self.count += 1
2033 self.count += 1
2031 pr = prog()
2034 pr = prog()
2032 chunkiter = changegroup.chunkiter(source, progress=pr)
2035 chunkiter = changegroup.chunkiter(source, progress=pr)
2033 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2036 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2034 raise util.Abort(_("received changelog group is empty"))
2037 raise util.Abort(_("received changelog group is empty"))
2035 clend = len(cl)
2038 clend = len(cl)
2036 changesets = clend - clstart
2039 changesets = clend - clstart
2037 self.ui.progress('changesets', None)
2040 self.ui.progress('changesets', None)
2038
2041
2039 # pull off the manifest group
2042 # pull off the manifest group
2040 self.ui.status(_("adding manifests\n"))
2043 self.ui.status(_("adding manifests\n"))
2041 pr.step = 'manifests'
2044 pr.step = 'manifests'
2042 pr.count = 1
2045 pr.count = 1
2043 chunkiter = changegroup.chunkiter(source, progress=pr)
2046 chunkiter = changegroup.chunkiter(source, progress=pr)
2044 # no need to check for empty manifest group here:
2047 # no need to check for empty manifest group here:
2045 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2048 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2046 # no new manifest will be created and the manifest group will
2049 # no new manifest will be created and the manifest group will
2047 # be empty during the pull
2050 # be empty during the pull
2048 self.manifest.addgroup(chunkiter, revmap, trp)
2051 self.manifest.addgroup(chunkiter, revmap, trp)
2049 self.ui.progress('manifests', None)
2052 self.ui.progress('manifests', None)
2050
2053
2051 needfiles = {}
2054 needfiles = {}
2052 if self.ui.configbool('server', 'validate', default=False):
2055 if self.ui.configbool('server', 'validate', default=False):
2053 # validate incoming csets have their manifests
2056 # validate incoming csets have their manifests
2054 for cset in xrange(clstart, clend):
2057 for cset in xrange(clstart, clend):
2055 mfest = self.changelog.read(self.changelog.node(cset))[0]
2058 mfest = self.changelog.read(self.changelog.node(cset))[0]
2056 mfest = self.manifest.readdelta(mfest)
2059 mfest = self.manifest.readdelta(mfest)
2057 # store file nodes we must see
2060 # store file nodes we must see
2058 for f, n in mfest.iteritems():
2061 for f, n in mfest.iteritems():
2059 needfiles.setdefault(f, set()).add(n)
2062 needfiles.setdefault(f, set()).add(n)
2060
2063
2061 # process the files
2064 # process the files
2062 self.ui.status(_("adding file changes\n"))
2065 self.ui.status(_("adding file changes\n"))
2063 pr.step = 'files'
2066 pr.step = 'files'
2064 pr.count = 1
2067 pr.count = 1
2065 while 1:
2068 while 1:
2066 f = changegroup.getchunk(source)
2069 f = changegroup.getchunk(source)
2067 if not f:
2070 if not f:
2068 break
2071 break
2069 self.ui.debug("adding %s revisions\n" % f)
2072 self.ui.debug("adding %s revisions\n" % f)
2070 fl = self.file(f)
2073 fl = self.file(f)
2071 o = len(fl)
2074 o = len(fl)
2072 chunkiter = changegroup.chunkiter(source, progress=pr)
2075 chunkiter = changegroup.chunkiter(source, progress=pr)
2073 if fl.addgroup(chunkiter, revmap, trp) is None:
2076 if fl.addgroup(chunkiter, revmap, trp) is None:
2074 raise util.Abort(_("received file revlog group is empty"))
2077 raise util.Abort(_("received file revlog group is empty"))
2075 revisions += len(fl) - o
2078 revisions += len(fl) - o
2076 files += 1
2079 files += 1
2077 if f in needfiles:
2080 if f in needfiles:
2078 needs = needfiles[f]
2081 needs = needfiles[f]
2079 for new in xrange(o, len(fl)):
2082 for new in xrange(o, len(fl)):
2080 n = fl.node(new)
2083 n = fl.node(new)
2081 if n in needs:
2084 if n in needs:
2082 needs.remove(n)
2085 needs.remove(n)
2083 if not needs:
2086 if not needs:
2084 del needfiles[f]
2087 del needfiles[f]
2085 self.ui.progress('files', None)
2088 self.ui.progress('files', None)
2086
2089
2087 for f, needs in needfiles.iteritems():
2090 for f, needs in needfiles.iteritems():
2088 fl = self.file(f)
2091 fl = self.file(f)
2089 for n in needs:
2092 for n in needs:
2090 try:
2093 try:
2091 fl.rev(n)
2094 fl.rev(n)
2092 except error.LookupError:
2095 except error.LookupError:
2093 raise util.Abort(
2096 raise util.Abort(
2094 _('missing file data for %s:%s - run hg verify') %
2097 _('missing file data for %s:%s - run hg verify') %
2095 (f, hex(n)))
2098 (f, hex(n)))
2096
2099
2097 newheads = len(cl.heads())
2100 newheads = len(cl.heads())
2098 heads = ""
2101 heads = ""
2099 if oldheads and newheads != oldheads:
2102 if oldheads and newheads != oldheads:
2100 heads = _(" (%+d heads)") % (newheads - oldheads)
2103 heads = _(" (%+d heads)") % (newheads - oldheads)
2101
2104
2102 self.ui.status(_("added %d changesets"
2105 self.ui.status(_("added %d changesets"
2103 " with %d changes to %d files%s\n")
2106 " with %d changes to %d files%s\n")
2104 % (changesets, revisions, files, heads))
2107 % (changesets, revisions, files, heads))
2105
2108
2106 if changesets > 0:
2109 if changesets > 0:
2107 p = lambda: cl.writepending() and self.root or ""
2110 p = lambda: cl.writepending() and self.root or ""
2108 self.hook('pretxnchangegroup', throw=True,
2111 self.hook('pretxnchangegroup', throw=True,
2109 node=hex(cl.node(clstart)), source=srctype,
2112 node=hex(cl.node(clstart)), source=srctype,
2110 url=url, pending=p)
2113 url=url, pending=p)
2111
2114
2112 # make changelog see real files again
2115 # make changelog see real files again
2113 cl.finalize(trp)
2116 cl.finalize(trp)
2114
2117
2115 tr.close()
2118 tr.close()
2116 finally:
2119 finally:
2117 del tr
2120 del tr
2118
2121
2119 if changesets > 0:
2122 if changesets > 0:
2120 # forcefully update the on-disk branch cache
2123 # forcefully update the on-disk branch cache
2121 self.ui.debug("updating the branch cache\n")
2124 self.ui.debug("updating the branch cache\n")
2122 self.branchtags()
2125 self.branchtags()
2123 self.hook("changegroup", node=hex(cl.node(clstart)),
2126 self.hook("changegroup", node=hex(cl.node(clstart)),
2124 source=srctype, url=url)
2127 source=srctype, url=url)
2125
2128
2126 for i in xrange(clstart, clend):
2129 for i in xrange(clstart, clend):
2127 self.hook("incoming", node=hex(cl.node(i)),
2130 self.hook("incoming", node=hex(cl.node(i)),
2128 source=srctype, url=url)
2131 source=srctype, url=url)
2129
2132
2130 # never return 0 here:
2133 # never return 0 here:
2131 if newheads < oldheads:
2134 if newheads < oldheads:
2132 return newheads - oldheads - 1
2135 return newheads - oldheads - 1
2133 else:
2136 else:
2134 return newheads - oldheads + 1
2137 return newheads - oldheads + 1
2135
2138
2136
2139
2137 def stream_in(self, remote):
2140 def stream_in(self, remote):
2138 fp = remote.stream_out()
2141 fp = remote.stream_out()
2139 l = fp.readline()
2142 l = fp.readline()
2140 try:
2143 try:
2141 resp = int(l)
2144 resp = int(l)
2142 except ValueError:
2145 except ValueError:
2143 raise error.ResponseError(
2146 raise error.ResponseError(
2144 _('Unexpected response from remote server:'), l)
2147 _('Unexpected response from remote server:'), l)
2145 if resp == 1:
2148 if resp == 1:
2146 raise util.Abort(_('operation forbidden by server'))
2149 raise util.Abort(_('operation forbidden by server'))
2147 elif resp == 2:
2150 elif resp == 2:
2148 raise util.Abort(_('locking the remote repository failed'))
2151 raise util.Abort(_('locking the remote repository failed'))
2149 elif resp != 0:
2152 elif resp != 0:
2150 raise util.Abort(_('the server sent an unknown error code'))
2153 raise util.Abort(_('the server sent an unknown error code'))
2151 self.ui.status(_('streaming all changes\n'))
2154 self.ui.status(_('streaming all changes\n'))
2152 l = fp.readline()
2155 l = fp.readline()
2153 try:
2156 try:
2154 total_files, total_bytes = map(int, l.split(' ', 1))
2157 total_files, total_bytes = map(int, l.split(' ', 1))
2155 except (ValueError, TypeError):
2158 except (ValueError, TypeError):
2156 raise error.ResponseError(
2159 raise error.ResponseError(
2157 _('Unexpected response from remote server:'), l)
2160 _('Unexpected response from remote server:'), l)
2158 self.ui.status(_('%d files to transfer, %s of data\n') %
2161 self.ui.status(_('%d files to transfer, %s of data\n') %
2159 (total_files, util.bytecount(total_bytes)))
2162 (total_files, util.bytecount(total_bytes)))
2160 start = time.time()
2163 start = time.time()
2161 for i in xrange(total_files):
2164 for i in xrange(total_files):
2162 # XXX doesn't support '\n' or '\r' in filenames
2165 # XXX doesn't support '\n' or '\r' in filenames
2163 l = fp.readline()
2166 l = fp.readline()
2164 try:
2167 try:
2165 name, size = l.split('\0', 1)
2168 name, size = l.split('\0', 1)
2166 size = int(size)
2169 size = int(size)
2167 except (ValueError, TypeError):
2170 except (ValueError, TypeError):
2168 raise error.ResponseError(
2171 raise error.ResponseError(
2169 _('Unexpected response from remote server:'), l)
2172 _('Unexpected response from remote server:'), l)
2170 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2173 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2171 # for backwards compat, name was partially encoded
2174 # for backwards compat, name was partially encoded
2172 ofp = self.sopener(store.decodedir(name), 'w')
2175 ofp = self.sopener(store.decodedir(name), 'w')
2173 for chunk in util.filechunkiter(fp, limit=size):
2176 for chunk in util.filechunkiter(fp, limit=size):
2174 ofp.write(chunk)
2177 ofp.write(chunk)
2175 ofp.close()
2178 ofp.close()
2176 elapsed = time.time() - start
2179 elapsed = time.time() - start
2177 if elapsed <= 0:
2180 if elapsed <= 0:
2178 elapsed = 0.001
2181 elapsed = 0.001
2179 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2182 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2180 (util.bytecount(total_bytes), elapsed,
2183 (util.bytecount(total_bytes), elapsed,
2181 util.bytecount(total_bytes / elapsed)))
2184 util.bytecount(total_bytes / elapsed)))
2182 self.invalidate()
2185 self.invalidate()
2183 return len(self.heads()) + 1
2186 return len(self.heads()) + 1
2184
2187
2185 def clone(self, remote, heads=[], stream=False):
2188 def clone(self, remote, heads=[], stream=False):
2186 '''clone remote repository.
2189 '''clone remote repository.
2187
2190
2188 keyword arguments:
2191 keyword arguments:
2189 heads: list of revs to clone (forces use of pull)
2192 heads: list of revs to clone (forces use of pull)
2190 stream: use streaming clone if possible'''
2193 stream: use streaming clone if possible'''
2191
2194
2192 # now, all clients that can request uncompressed clones can
2195 # now, all clients that can request uncompressed clones can
2193 # read repo formats supported by all servers that can serve
2196 # read repo formats supported by all servers that can serve
2194 # them.
2197 # them.
2195
2198
2196 # if revlog format changes, client will have to check version
2199 # if revlog format changes, client will have to check version
2197 # and format flags on "stream" capability, and use
2200 # and format flags on "stream" capability, and use
2198 # uncompressed only if compatible.
2201 # uncompressed only if compatible.
2199
2202
2200 if stream and not heads and remote.capable('stream'):
2203 if stream and not heads and remote.capable('stream'):
2201 return self.stream_in(remote)
2204 return self.stream_in(remote)
2202 return self.pull(remote, heads)
2205 return self.pull(remote, heads)
2203
2206
2204 # used to avoid circular references so destructors work
2207 # used to avoid circular references so destructors work
2205 def aftertrans(files):
2208 def aftertrans(files):
2206 renamefiles = [tuple(t) for t in files]
2209 renamefiles = [tuple(t) for t in files]
2207 def a():
2210 def a():
2208 for src, dest in renamefiles:
2211 for src, dest in renamefiles:
2209 util.rename(src, dest)
2212 util.rename(src, dest)
2210 return a
2213 return a
2211
2214
2212 def instance(ui, path, create):
2215 def instance(ui, path, create):
2213 return localrepository(ui, util.drop_scheme('file', path), create)
2216 return localrepository(ui, util.drop_scheme('file', path), create)
2214
2217
2215 def islocal(path):
2218 def islocal(path):
2216 return True
2219 return True
General Comments 0
You need to be logged in to leave comments. Login now