##// END OF EJS Templates
localrepo: use set.update for bulk updates
Martin Geisler -
r8481:a9dab5a0 default
parent child Browse files
Show More
@@ -1,2094 +1,2092 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2, incorporated herein by reference.
6 # GNU General Public License version 2, incorporated herein by reference.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as match_
14 import match as match_
15 import merge as merge_
15 import merge as merge_
16 from lock import release
16 from lock import release
17 import weakref, stat, errno, os, time, inspect
17 import weakref, stat, errno, os, time, inspect
18 propertycache = util.propertycache
18 propertycache = util.propertycache
19
19
20 class localrepository(repo.repository):
20 class localrepository(repo.repository):
21 capabilities = set(('lookup', 'changegroupsubset'))
21 capabilities = set(('lookup', 'changegroupsubset'))
22 supported = set('revlogv1 store fncache'.split())
22 supported = set('revlogv1 store fncache'.split())
23
23
24 def __init__(self, baseui, path=None, create=0):
24 def __init__(self, baseui, path=None, create=0):
25 repo.repository.__init__(self)
25 repo.repository.__init__(self)
26 self.root = os.path.realpath(path)
26 self.root = os.path.realpath(path)
27 self.path = os.path.join(self.root, ".hg")
27 self.path = os.path.join(self.root, ".hg")
28 self.origroot = path
28 self.origroot = path
29 self.opener = util.opener(self.path)
29 self.opener = util.opener(self.path)
30 self.wopener = util.opener(self.root)
30 self.wopener = util.opener(self.root)
31
31
32 if not os.path.isdir(self.path):
32 if not os.path.isdir(self.path):
33 if create:
33 if create:
34 if not os.path.exists(path):
34 if not os.path.exists(path):
35 os.mkdir(path)
35 os.mkdir(path)
36 os.mkdir(self.path)
36 os.mkdir(self.path)
37 requirements = ["revlogv1"]
37 requirements = ["revlogv1"]
38 if baseui.configbool('format', 'usestore', True):
38 if baseui.configbool('format', 'usestore', True):
39 os.mkdir(os.path.join(self.path, "store"))
39 os.mkdir(os.path.join(self.path, "store"))
40 requirements.append("store")
40 requirements.append("store")
41 if baseui.configbool('format', 'usefncache', True):
41 if baseui.configbool('format', 'usefncache', True):
42 requirements.append("fncache")
42 requirements.append("fncache")
43 # create an invalid changelog
43 # create an invalid changelog
44 self.opener("00changelog.i", "a").write(
44 self.opener("00changelog.i", "a").write(
45 '\0\0\0\2' # represents revlogv2
45 '\0\0\0\2' # represents revlogv2
46 ' dummy changelog to prevent using the old repo layout'
46 ' dummy changelog to prevent using the old repo layout'
47 )
47 )
48 reqfile = self.opener("requires", "w")
48 reqfile = self.opener("requires", "w")
49 for r in requirements:
49 for r in requirements:
50 reqfile.write("%s\n" % r)
50 reqfile.write("%s\n" % r)
51 reqfile.close()
51 reqfile.close()
52 else:
52 else:
53 raise error.RepoError(_("repository %s not found") % path)
53 raise error.RepoError(_("repository %s not found") % path)
54 elif create:
54 elif create:
55 raise error.RepoError(_("repository %s already exists") % path)
55 raise error.RepoError(_("repository %s already exists") % path)
56 else:
56 else:
57 # find requirements
57 # find requirements
58 requirements = set()
58 requirements = set()
59 try:
59 try:
60 requirements = set(self.opener("requires").read().splitlines())
60 requirements = set(self.opener("requires").read().splitlines())
61 except IOError, inst:
61 except IOError, inst:
62 if inst.errno != errno.ENOENT:
62 if inst.errno != errno.ENOENT:
63 raise
63 raise
64 for r in requirements - self.supported:
64 for r in requirements - self.supported:
65 raise error.RepoError(_("requirement '%s' not supported") % r)
65 raise error.RepoError(_("requirement '%s' not supported") % r)
66
66
67 self.store = store.store(requirements, self.path, util.opener)
67 self.store = store.store(requirements, self.path, util.opener)
68 self.spath = self.store.path
68 self.spath = self.store.path
69 self.sopener = self.store.opener
69 self.sopener = self.store.opener
70 self.sjoin = self.store.join
70 self.sjoin = self.store.join
71 self.opener.createmode = self.store.createmode
71 self.opener.createmode = self.store.createmode
72
72
73 self.baseui = baseui
73 self.baseui = baseui
74 self.ui = baseui.copy()
74 self.ui = baseui.copy()
75 try:
75 try:
76 self.ui.readconfig(self.join("hgrc"), self.root)
76 self.ui.readconfig(self.join("hgrc"), self.root)
77 extensions.loadall(self.ui)
77 extensions.loadall(self.ui)
78 except IOError:
78 except IOError:
79 pass
79 pass
80
80
81 self.tagscache = None
81 self.tagscache = None
82 self._tagstypecache = None
82 self._tagstypecache = None
83 self.branchcache = None
83 self.branchcache = None
84 self._ubranchcache = None # UTF-8 version of branchcache
84 self._ubranchcache = None # UTF-8 version of branchcache
85 self._branchcachetip = None
85 self._branchcachetip = None
86 self.nodetagscache = None
86 self.nodetagscache = None
87 self.filterpats = {}
87 self.filterpats = {}
88 self._datafilters = {}
88 self._datafilters = {}
89 self._transref = self._lockref = self._wlockref = None
89 self._transref = self._lockref = self._wlockref = None
90
90
91 @propertycache
91 @propertycache
92 def changelog(self):
92 def changelog(self):
93 c = changelog.changelog(self.sopener)
93 c = changelog.changelog(self.sopener)
94 if 'HG_PENDING' in os.environ:
94 if 'HG_PENDING' in os.environ:
95 p = os.environ['HG_PENDING']
95 p = os.environ['HG_PENDING']
96 if p.startswith(self.root):
96 if p.startswith(self.root):
97 c.readpending('00changelog.i.a')
97 c.readpending('00changelog.i.a')
98 self.sopener.defversion = c.version
98 self.sopener.defversion = c.version
99 return c
99 return c
100
100
101 @propertycache
101 @propertycache
102 def manifest(self):
102 def manifest(self):
103 return manifest.manifest(self.sopener)
103 return manifest.manifest(self.sopener)
104
104
105 @propertycache
105 @propertycache
106 def dirstate(self):
106 def dirstate(self):
107 return dirstate.dirstate(self.opener, self.ui, self.root)
107 return dirstate.dirstate(self.opener, self.ui, self.root)
108
108
109 def __getitem__(self, changeid):
109 def __getitem__(self, changeid):
110 if changeid == None:
110 if changeid == None:
111 return context.workingctx(self)
111 return context.workingctx(self)
112 return context.changectx(self, changeid)
112 return context.changectx(self, changeid)
113
113
114 def __nonzero__(self):
114 def __nonzero__(self):
115 return True
115 return True
116
116
117 def __len__(self):
117 def __len__(self):
118 return len(self.changelog)
118 return len(self.changelog)
119
119
120 def __iter__(self):
120 def __iter__(self):
121 for i in xrange(len(self)):
121 for i in xrange(len(self)):
122 yield i
122 yield i
123
123
124 def url(self):
124 def url(self):
125 return 'file:' + self.root
125 return 'file:' + self.root
126
126
127 def hook(self, name, throw=False, **args):
127 def hook(self, name, throw=False, **args):
128 return hook.hook(self.ui, self, name, throw, **args)
128 return hook.hook(self.ui, self, name, throw, **args)
129
129
130 tag_disallowed = ':\r\n'
130 tag_disallowed = ':\r\n'
131
131
132 def _tag(self, names, node, message, local, user, date, extra={}):
132 def _tag(self, names, node, message, local, user, date, extra={}):
133 if isinstance(names, str):
133 if isinstance(names, str):
134 allchars = names
134 allchars = names
135 names = (names,)
135 names = (names,)
136 else:
136 else:
137 allchars = ''.join(names)
137 allchars = ''.join(names)
138 for c in self.tag_disallowed:
138 for c in self.tag_disallowed:
139 if c in allchars:
139 if c in allchars:
140 raise util.Abort(_('%r cannot be used in a tag name') % c)
140 raise util.Abort(_('%r cannot be used in a tag name') % c)
141
141
142 for name in names:
142 for name in names:
143 self.hook('pretag', throw=True, node=hex(node), tag=name,
143 self.hook('pretag', throw=True, node=hex(node), tag=name,
144 local=local)
144 local=local)
145
145
146 def writetags(fp, names, munge, prevtags):
146 def writetags(fp, names, munge, prevtags):
147 fp.seek(0, 2)
147 fp.seek(0, 2)
148 if prevtags and prevtags[-1] != '\n':
148 if prevtags and prevtags[-1] != '\n':
149 fp.write('\n')
149 fp.write('\n')
150 for name in names:
150 for name in names:
151 m = munge and munge(name) or name
151 m = munge and munge(name) or name
152 if self._tagstypecache and name in self._tagstypecache:
152 if self._tagstypecache and name in self._tagstypecache:
153 old = self.tagscache.get(name, nullid)
153 old = self.tagscache.get(name, nullid)
154 fp.write('%s %s\n' % (hex(old), m))
154 fp.write('%s %s\n' % (hex(old), m))
155 fp.write('%s %s\n' % (hex(node), m))
155 fp.write('%s %s\n' % (hex(node), m))
156 fp.close()
156 fp.close()
157
157
158 prevtags = ''
158 prevtags = ''
159 if local:
159 if local:
160 try:
160 try:
161 fp = self.opener('localtags', 'r+')
161 fp = self.opener('localtags', 'r+')
162 except IOError:
162 except IOError:
163 fp = self.opener('localtags', 'a')
163 fp = self.opener('localtags', 'a')
164 else:
164 else:
165 prevtags = fp.read()
165 prevtags = fp.read()
166
166
167 # local tags are stored in the current charset
167 # local tags are stored in the current charset
168 writetags(fp, names, None, prevtags)
168 writetags(fp, names, None, prevtags)
169 for name in names:
169 for name in names:
170 self.hook('tag', node=hex(node), tag=name, local=local)
170 self.hook('tag', node=hex(node), tag=name, local=local)
171 return
171 return
172
172
173 try:
173 try:
174 fp = self.wfile('.hgtags', 'rb+')
174 fp = self.wfile('.hgtags', 'rb+')
175 except IOError:
175 except IOError:
176 fp = self.wfile('.hgtags', 'ab')
176 fp = self.wfile('.hgtags', 'ab')
177 else:
177 else:
178 prevtags = fp.read()
178 prevtags = fp.read()
179
179
180 # committed tags are stored in UTF-8
180 # committed tags are stored in UTF-8
181 writetags(fp, names, encoding.fromlocal, prevtags)
181 writetags(fp, names, encoding.fromlocal, prevtags)
182
182
183 if '.hgtags' not in self.dirstate:
183 if '.hgtags' not in self.dirstate:
184 self.add(['.hgtags'])
184 self.add(['.hgtags'])
185
185
186 tagnode = self.commit(['.hgtags'], message, user, date, extra=extra)
186 tagnode = self.commit(['.hgtags'], message, user, date, extra=extra)
187
187
188 for name in names:
188 for name in names:
189 self.hook('tag', node=hex(node), tag=name, local=local)
189 self.hook('tag', node=hex(node), tag=name, local=local)
190
190
191 return tagnode
191 return tagnode
192
192
193 def tag(self, names, node, message, local, user, date):
193 def tag(self, names, node, message, local, user, date):
194 '''tag a revision with one or more symbolic names.
194 '''tag a revision with one or more symbolic names.
195
195
196 names is a list of strings or, when adding a single tag, names may be a
196 names is a list of strings or, when adding a single tag, names may be a
197 string.
197 string.
198
198
199 if local is True, the tags are stored in a per-repository file.
199 if local is True, the tags are stored in a per-repository file.
200 otherwise, they are stored in the .hgtags file, and a new
200 otherwise, they are stored in the .hgtags file, and a new
201 changeset is committed with the change.
201 changeset is committed with the change.
202
202
203 keyword arguments:
203 keyword arguments:
204
204
205 local: whether to store tags in non-version-controlled file
205 local: whether to store tags in non-version-controlled file
206 (default False)
206 (default False)
207
207
208 message: commit message to use if committing
208 message: commit message to use if committing
209
209
210 user: name of user to use if committing
210 user: name of user to use if committing
211
211
212 date: date tuple to use if committing'''
212 date: date tuple to use if committing'''
213
213
214 for x in self.status()[:5]:
214 for x in self.status()[:5]:
215 if '.hgtags' in x:
215 if '.hgtags' in x:
216 raise util.Abort(_('working copy of .hgtags is changed '
216 raise util.Abort(_('working copy of .hgtags is changed '
217 '(please commit .hgtags manually)'))
217 '(please commit .hgtags manually)'))
218
218
219 self.tags() # instantiate the cache
219 self.tags() # instantiate the cache
220 self._tag(names, node, message, local, user, date)
220 self._tag(names, node, message, local, user, date)
221
221
222 def tags(self):
222 def tags(self):
223 '''return a mapping of tag to node'''
223 '''return a mapping of tag to node'''
224 if self.tagscache:
224 if self.tagscache:
225 return self.tagscache
225 return self.tagscache
226
226
227 globaltags = {}
227 globaltags = {}
228 tagtypes = {}
228 tagtypes = {}
229
229
230 def readtags(lines, fn, tagtype):
230 def readtags(lines, fn, tagtype):
231 filetags = {}
231 filetags = {}
232 count = 0
232 count = 0
233
233
234 def warn(msg):
234 def warn(msg):
235 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
235 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
236
236
237 for l in lines:
237 for l in lines:
238 count += 1
238 count += 1
239 if not l:
239 if not l:
240 continue
240 continue
241 s = l.split(" ", 1)
241 s = l.split(" ", 1)
242 if len(s) != 2:
242 if len(s) != 2:
243 warn(_("cannot parse entry"))
243 warn(_("cannot parse entry"))
244 continue
244 continue
245 node, key = s
245 node, key = s
246 key = encoding.tolocal(key.strip()) # stored in UTF-8
246 key = encoding.tolocal(key.strip()) # stored in UTF-8
247 try:
247 try:
248 bin_n = bin(node)
248 bin_n = bin(node)
249 except TypeError:
249 except TypeError:
250 warn(_("node '%s' is not well formed") % node)
250 warn(_("node '%s' is not well formed") % node)
251 continue
251 continue
252 if bin_n not in self.changelog.nodemap:
252 if bin_n not in self.changelog.nodemap:
253 warn(_("tag '%s' refers to unknown node") % key)
253 warn(_("tag '%s' refers to unknown node") % key)
254 continue
254 continue
255
255
256 h = []
256 h = []
257 if key in filetags:
257 if key in filetags:
258 n, h = filetags[key]
258 n, h = filetags[key]
259 h.append(n)
259 h.append(n)
260 filetags[key] = (bin_n, h)
260 filetags[key] = (bin_n, h)
261
261
262 for k, nh in filetags.iteritems():
262 for k, nh in filetags.iteritems():
263 if k not in globaltags:
263 if k not in globaltags:
264 globaltags[k] = nh
264 globaltags[k] = nh
265 tagtypes[k] = tagtype
265 tagtypes[k] = tagtype
266 continue
266 continue
267
267
268 # we prefer the global tag if:
268 # we prefer the global tag if:
269 # it supercedes us OR
269 # it supercedes us OR
270 # mutual supercedes and it has a higher rank
270 # mutual supercedes and it has a higher rank
271 # otherwise we win because we're tip-most
271 # otherwise we win because we're tip-most
272 an, ah = nh
272 an, ah = nh
273 bn, bh = globaltags[k]
273 bn, bh = globaltags[k]
274 if (bn != an and an in bh and
274 if (bn != an and an in bh and
275 (bn not in ah or len(bh) > len(ah))):
275 (bn not in ah or len(bh) > len(ah))):
276 an = bn
276 an = bn
277 ah.extend([n for n in bh if n not in ah])
277 ah.extend([n for n in bh if n not in ah])
278 globaltags[k] = an, ah
278 globaltags[k] = an, ah
279 tagtypes[k] = tagtype
279 tagtypes[k] = tagtype
280
280
281 # read the tags file from each head, ending with the tip
281 # read the tags file from each head, ending with the tip
282 f = None
282 f = None
283 for rev, node, fnode in self._hgtagsnodes():
283 for rev, node, fnode in self._hgtagsnodes():
284 f = (f and f.filectx(fnode) or
284 f = (f and f.filectx(fnode) or
285 self.filectx('.hgtags', fileid=fnode))
285 self.filectx('.hgtags', fileid=fnode))
286 readtags(f.data().splitlines(), f, "global")
286 readtags(f.data().splitlines(), f, "global")
287
287
288 try:
288 try:
289 data = encoding.fromlocal(self.opener("localtags").read())
289 data = encoding.fromlocal(self.opener("localtags").read())
290 # localtags are stored in the local character set
290 # localtags are stored in the local character set
291 # while the internal tag table is stored in UTF-8
291 # while the internal tag table is stored in UTF-8
292 readtags(data.splitlines(), "localtags", "local")
292 readtags(data.splitlines(), "localtags", "local")
293 except IOError:
293 except IOError:
294 pass
294 pass
295
295
296 self.tagscache = {}
296 self.tagscache = {}
297 self._tagstypecache = {}
297 self._tagstypecache = {}
298 for k, nh in globaltags.iteritems():
298 for k, nh in globaltags.iteritems():
299 n = nh[0]
299 n = nh[0]
300 if n != nullid:
300 if n != nullid:
301 self.tagscache[k] = n
301 self.tagscache[k] = n
302 self._tagstypecache[k] = tagtypes[k]
302 self._tagstypecache[k] = tagtypes[k]
303 self.tagscache['tip'] = self.changelog.tip()
303 self.tagscache['tip'] = self.changelog.tip()
304 return self.tagscache
304 return self.tagscache
305
305
306 def tagtype(self, tagname):
306 def tagtype(self, tagname):
307 '''
307 '''
308 return the type of the given tag. result can be:
308 return the type of the given tag. result can be:
309
309
310 'local' : a local tag
310 'local' : a local tag
311 'global' : a global tag
311 'global' : a global tag
312 None : tag does not exist
312 None : tag does not exist
313 '''
313 '''
314
314
315 self.tags()
315 self.tags()
316
316
317 return self._tagstypecache.get(tagname)
317 return self._tagstypecache.get(tagname)
318
318
319 def _hgtagsnodes(self):
319 def _hgtagsnodes(self):
320 last = {}
320 last = {}
321 ret = []
321 ret = []
322 for node in reversed(self.heads()):
322 for node in reversed(self.heads()):
323 c = self[node]
323 c = self[node]
324 rev = c.rev()
324 rev = c.rev()
325 try:
325 try:
326 fnode = c.filenode('.hgtags')
326 fnode = c.filenode('.hgtags')
327 except error.LookupError:
327 except error.LookupError:
328 continue
328 continue
329 ret.append((rev, node, fnode))
329 ret.append((rev, node, fnode))
330 if fnode in last:
330 if fnode in last:
331 ret[last[fnode]] = None
331 ret[last[fnode]] = None
332 last[fnode] = len(ret) - 1
332 last[fnode] = len(ret) - 1
333 return [item for item in ret if item]
333 return [item for item in ret if item]
334
334
335 def tagslist(self):
335 def tagslist(self):
336 '''return a list of tags ordered by revision'''
336 '''return a list of tags ordered by revision'''
337 l = []
337 l = []
338 for t, n in self.tags().iteritems():
338 for t, n in self.tags().iteritems():
339 try:
339 try:
340 r = self.changelog.rev(n)
340 r = self.changelog.rev(n)
341 except:
341 except:
342 r = -2 # sort to the beginning of the list if unknown
342 r = -2 # sort to the beginning of the list if unknown
343 l.append((r, t, n))
343 l.append((r, t, n))
344 return [(t, n) for r, t, n in sorted(l)]
344 return [(t, n) for r, t, n in sorted(l)]
345
345
346 def nodetags(self, node):
346 def nodetags(self, node):
347 '''return the tags associated with a node'''
347 '''return the tags associated with a node'''
348 if not self.nodetagscache:
348 if not self.nodetagscache:
349 self.nodetagscache = {}
349 self.nodetagscache = {}
350 for t, n in self.tags().iteritems():
350 for t, n in self.tags().iteritems():
351 self.nodetagscache.setdefault(n, []).append(t)
351 self.nodetagscache.setdefault(n, []).append(t)
352 return self.nodetagscache.get(node, [])
352 return self.nodetagscache.get(node, [])
353
353
354 def _branchtags(self, partial, lrev):
354 def _branchtags(self, partial, lrev):
355 # TODO: rename this function?
355 # TODO: rename this function?
356 tiprev = len(self) - 1
356 tiprev = len(self) - 1
357 if lrev != tiprev:
357 if lrev != tiprev:
358 self._updatebranchcache(partial, lrev+1, tiprev+1)
358 self._updatebranchcache(partial, lrev+1, tiprev+1)
359 self._writebranchcache(partial, self.changelog.tip(), tiprev)
359 self._writebranchcache(partial, self.changelog.tip(), tiprev)
360
360
361 return partial
361 return partial
362
362
363 def _branchheads(self):
363 def _branchheads(self):
364 tip = self.changelog.tip()
364 tip = self.changelog.tip()
365 if self.branchcache is not None and self._branchcachetip == tip:
365 if self.branchcache is not None and self._branchcachetip == tip:
366 return self.branchcache
366 return self.branchcache
367
367
368 oldtip = self._branchcachetip
368 oldtip = self._branchcachetip
369 self._branchcachetip = tip
369 self._branchcachetip = tip
370 if self.branchcache is None:
370 if self.branchcache is None:
371 self.branchcache = {} # avoid recursion in changectx
371 self.branchcache = {} # avoid recursion in changectx
372 else:
372 else:
373 self.branchcache.clear() # keep using the same dict
373 self.branchcache.clear() # keep using the same dict
374 if oldtip is None or oldtip not in self.changelog.nodemap:
374 if oldtip is None or oldtip not in self.changelog.nodemap:
375 partial, last, lrev = self._readbranchcache()
375 partial, last, lrev = self._readbranchcache()
376 else:
376 else:
377 lrev = self.changelog.rev(oldtip)
377 lrev = self.changelog.rev(oldtip)
378 partial = self._ubranchcache
378 partial = self._ubranchcache
379
379
380 self._branchtags(partial, lrev)
380 self._branchtags(partial, lrev)
381 # this private cache holds all heads (not just tips)
381 # this private cache holds all heads (not just tips)
382 self._ubranchcache = partial
382 self._ubranchcache = partial
383
383
384 # the branch cache is stored on disk as UTF-8, but in the local
384 # the branch cache is stored on disk as UTF-8, but in the local
385 # charset internally
385 # charset internally
386 for k, v in partial.iteritems():
386 for k, v in partial.iteritems():
387 self.branchcache[encoding.tolocal(k)] = v
387 self.branchcache[encoding.tolocal(k)] = v
388 return self.branchcache
388 return self.branchcache
389
389
390
390
391 def branchtags(self):
391 def branchtags(self):
392 '''return a dict where branch names map to the tipmost head of
392 '''return a dict where branch names map to the tipmost head of
393 the branch, open heads come before closed'''
393 the branch, open heads come before closed'''
394 bt = {}
394 bt = {}
395 for bn, heads in self._branchheads().iteritems():
395 for bn, heads in self._branchheads().iteritems():
396 head = None
396 head = None
397 for i in range(len(heads)-1, -1, -1):
397 for i in range(len(heads)-1, -1, -1):
398 h = heads[i]
398 h = heads[i]
399 if 'close' not in self.changelog.read(h)[5]:
399 if 'close' not in self.changelog.read(h)[5]:
400 head = h
400 head = h
401 break
401 break
402 # no open heads were found
402 # no open heads were found
403 if head is None:
403 if head is None:
404 head = heads[-1]
404 head = heads[-1]
405 bt[bn] = head
405 bt[bn] = head
406 return bt
406 return bt
407
407
408
408
409 def _readbranchcache(self):
409 def _readbranchcache(self):
410 partial = {}
410 partial = {}
411 try:
411 try:
412 f = self.opener("branchheads.cache")
412 f = self.opener("branchheads.cache")
413 lines = f.read().split('\n')
413 lines = f.read().split('\n')
414 f.close()
414 f.close()
415 except (IOError, OSError):
415 except (IOError, OSError):
416 return {}, nullid, nullrev
416 return {}, nullid, nullrev
417
417
418 try:
418 try:
419 last, lrev = lines.pop(0).split(" ", 1)
419 last, lrev = lines.pop(0).split(" ", 1)
420 last, lrev = bin(last), int(lrev)
420 last, lrev = bin(last), int(lrev)
421 if lrev >= len(self) or self[lrev].node() != last:
421 if lrev >= len(self) or self[lrev].node() != last:
422 # invalidate the cache
422 # invalidate the cache
423 raise ValueError('invalidating branch cache (tip differs)')
423 raise ValueError('invalidating branch cache (tip differs)')
424 for l in lines:
424 for l in lines:
425 if not l: continue
425 if not l: continue
426 node, label = l.split(" ", 1)
426 node, label = l.split(" ", 1)
427 partial.setdefault(label.strip(), []).append(bin(node))
427 partial.setdefault(label.strip(), []).append(bin(node))
428 except KeyboardInterrupt:
428 except KeyboardInterrupt:
429 raise
429 raise
430 except Exception, inst:
430 except Exception, inst:
431 if self.ui.debugflag:
431 if self.ui.debugflag:
432 self.ui.warn(str(inst), '\n')
432 self.ui.warn(str(inst), '\n')
433 partial, last, lrev = {}, nullid, nullrev
433 partial, last, lrev = {}, nullid, nullrev
434 return partial, last, lrev
434 return partial, last, lrev
435
435
436 def _writebranchcache(self, branches, tip, tiprev):
436 def _writebranchcache(self, branches, tip, tiprev):
437 try:
437 try:
438 f = self.opener("branchheads.cache", "w", atomictemp=True)
438 f = self.opener("branchheads.cache", "w", atomictemp=True)
439 f.write("%s %s\n" % (hex(tip), tiprev))
439 f.write("%s %s\n" % (hex(tip), tiprev))
440 for label, nodes in branches.iteritems():
440 for label, nodes in branches.iteritems():
441 for node in nodes:
441 for node in nodes:
442 f.write("%s %s\n" % (hex(node), label))
442 f.write("%s %s\n" % (hex(node), label))
443 f.rename()
443 f.rename()
444 except (IOError, OSError):
444 except (IOError, OSError):
445 pass
445 pass
446
446
447 def _updatebranchcache(self, partial, start, end):
447 def _updatebranchcache(self, partial, start, end):
448 for r in xrange(start, end):
448 for r in xrange(start, end):
449 c = self[r]
449 c = self[r]
450 b = c.branch()
450 b = c.branch()
451 bheads = partial.setdefault(b, [])
451 bheads = partial.setdefault(b, [])
452 bheads.append(c.node())
452 bheads.append(c.node())
453 for p in c.parents():
453 for p in c.parents():
454 pn = p.node()
454 pn = p.node()
455 if pn in bheads:
455 if pn in bheads:
456 bheads.remove(pn)
456 bheads.remove(pn)
457
457
458 def lookup(self, key):
458 def lookup(self, key):
459 if isinstance(key, int):
459 if isinstance(key, int):
460 return self.changelog.node(key)
460 return self.changelog.node(key)
461 elif key == '.':
461 elif key == '.':
462 return self.dirstate.parents()[0]
462 return self.dirstate.parents()[0]
463 elif key == 'null':
463 elif key == 'null':
464 return nullid
464 return nullid
465 elif key == 'tip':
465 elif key == 'tip':
466 return self.changelog.tip()
466 return self.changelog.tip()
467 n = self.changelog._match(key)
467 n = self.changelog._match(key)
468 if n:
468 if n:
469 return n
469 return n
470 if key in self.tags():
470 if key in self.tags():
471 return self.tags()[key]
471 return self.tags()[key]
472 if key in self.branchtags():
472 if key in self.branchtags():
473 return self.branchtags()[key]
473 return self.branchtags()[key]
474 n = self.changelog._partialmatch(key)
474 n = self.changelog._partialmatch(key)
475 if n:
475 if n:
476 return n
476 return n
477 try:
477 try:
478 if len(key) == 20:
478 if len(key) == 20:
479 key = hex(key)
479 key = hex(key)
480 except:
480 except:
481 pass
481 pass
482 raise error.RepoError(_("unknown revision '%s'") % key)
482 raise error.RepoError(_("unknown revision '%s'") % key)
483
483
484 def local(self):
484 def local(self):
485 return True
485 return True
486
486
487 def join(self, f):
487 def join(self, f):
488 return os.path.join(self.path, f)
488 return os.path.join(self.path, f)
489
489
490 def wjoin(self, f):
490 def wjoin(self, f):
491 return os.path.join(self.root, f)
491 return os.path.join(self.root, f)
492
492
493 def rjoin(self, f):
493 def rjoin(self, f):
494 return os.path.join(self.root, util.pconvert(f))
494 return os.path.join(self.root, util.pconvert(f))
495
495
496 def file(self, f):
496 def file(self, f):
497 if f[0] == '/':
497 if f[0] == '/':
498 f = f[1:]
498 f = f[1:]
499 return filelog.filelog(self.sopener, f)
499 return filelog.filelog(self.sopener, f)
500
500
501 def changectx(self, changeid):
501 def changectx(self, changeid):
502 return self[changeid]
502 return self[changeid]
503
503
504 def parents(self, changeid=None):
504 def parents(self, changeid=None):
505 '''get list of changectxs for parents of changeid'''
505 '''get list of changectxs for parents of changeid'''
506 return self[changeid].parents()
506 return self[changeid].parents()
507
507
508 def filectx(self, path, changeid=None, fileid=None):
508 def filectx(self, path, changeid=None, fileid=None):
509 """changeid can be a changeset revision, node, or tag.
509 """changeid can be a changeset revision, node, or tag.
510 fileid can be a file revision or node."""
510 fileid can be a file revision or node."""
511 return context.filectx(self, path, changeid, fileid)
511 return context.filectx(self, path, changeid, fileid)
512
512
513 def getcwd(self):
513 def getcwd(self):
514 return self.dirstate.getcwd()
514 return self.dirstate.getcwd()
515
515
516 def pathto(self, f, cwd=None):
516 def pathto(self, f, cwd=None):
517 return self.dirstate.pathto(f, cwd)
517 return self.dirstate.pathto(f, cwd)
518
518
519 def wfile(self, f, mode='r'):
519 def wfile(self, f, mode='r'):
520 return self.wopener(f, mode)
520 return self.wopener(f, mode)
521
521
522 def _link(self, f):
522 def _link(self, f):
523 return os.path.islink(self.wjoin(f))
523 return os.path.islink(self.wjoin(f))
524
524
525 def _filter(self, filter, filename, data):
525 def _filter(self, filter, filename, data):
526 if filter not in self.filterpats:
526 if filter not in self.filterpats:
527 l = []
527 l = []
528 for pat, cmd in self.ui.configitems(filter):
528 for pat, cmd in self.ui.configitems(filter):
529 if cmd == '!':
529 if cmd == '!':
530 continue
530 continue
531 mf = util.matcher(self.root, "", [pat], [], [])[1]
531 mf = util.matcher(self.root, "", [pat], [], [])[1]
532 fn = None
532 fn = None
533 params = cmd
533 params = cmd
534 for name, filterfn in self._datafilters.iteritems():
534 for name, filterfn in self._datafilters.iteritems():
535 if cmd.startswith(name):
535 if cmd.startswith(name):
536 fn = filterfn
536 fn = filterfn
537 params = cmd[len(name):].lstrip()
537 params = cmd[len(name):].lstrip()
538 break
538 break
539 if not fn:
539 if not fn:
540 fn = lambda s, c, **kwargs: util.filter(s, c)
540 fn = lambda s, c, **kwargs: util.filter(s, c)
541 # Wrap old filters not supporting keyword arguments
541 # Wrap old filters not supporting keyword arguments
542 if not inspect.getargspec(fn)[2]:
542 if not inspect.getargspec(fn)[2]:
543 oldfn = fn
543 oldfn = fn
544 fn = lambda s, c, **kwargs: oldfn(s, c)
544 fn = lambda s, c, **kwargs: oldfn(s, c)
545 l.append((mf, fn, params))
545 l.append((mf, fn, params))
546 self.filterpats[filter] = l
546 self.filterpats[filter] = l
547
547
548 for mf, fn, cmd in self.filterpats[filter]:
548 for mf, fn, cmd in self.filterpats[filter]:
549 if mf(filename):
549 if mf(filename):
550 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
550 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
551 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
551 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
552 break
552 break
553
553
554 return data
554 return data
555
555
556 def adddatafilter(self, name, filter):
556 def adddatafilter(self, name, filter):
557 self._datafilters[name] = filter
557 self._datafilters[name] = filter
558
558
559 def wread(self, filename):
559 def wread(self, filename):
560 if self._link(filename):
560 if self._link(filename):
561 data = os.readlink(self.wjoin(filename))
561 data = os.readlink(self.wjoin(filename))
562 else:
562 else:
563 data = self.wopener(filename, 'r').read()
563 data = self.wopener(filename, 'r').read()
564 return self._filter("encode", filename, data)
564 return self._filter("encode", filename, data)
565
565
566 def wwrite(self, filename, data, flags):
566 def wwrite(self, filename, data, flags):
567 data = self._filter("decode", filename, data)
567 data = self._filter("decode", filename, data)
568 try:
568 try:
569 os.unlink(self.wjoin(filename))
569 os.unlink(self.wjoin(filename))
570 except OSError:
570 except OSError:
571 pass
571 pass
572 if 'l' in flags:
572 if 'l' in flags:
573 self.wopener.symlink(data, filename)
573 self.wopener.symlink(data, filename)
574 else:
574 else:
575 self.wopener(filename, 'w').write(data)
575 self.wopener(filename, 'w').write(data)
576 if 'x' in flags:
576 if 'x' in flags:
577 util.set_flags(self.wjoin(filename), False, True)
577 util.set_flags(self.wjoin(filename), False, True)
578
578
579 def wwritedata(self, filename, data):
579 def wwritedata(self, filename, data):
580 return self._filter("decode", filename, data)
580 return self._filter("decode", filename, data)
581
581
582 def transaction(self):
582 def transaction(self):
583 tr = self._transref and self._transref() or None
583 tr = self._transref and self._transref() or None
584 if tr and tr.running():
584 if tr and tr.running():
585 return tr.nest()
585 return tr.nest()
586
586
587 # abort here if the journal already exists
587 # abort here if the journal already exists
588 if os.path.exists(self.sjoin("journal")):
588 if os.path.exists(self.sjoin("journal")):
589 raise error.RepoError(_("journal already exists - run hg recover"))
589 raise error.RepoError(_("journal already exists - run hg recover"))
590
590
591 # save dirstate for rollback
591 # save dirstate for rollback
592 try:
592 try:
593 ds = self.opener("dirstate").read()
593 ds = self.opener("dirstate").read()
594 except IOError:
594 except IOError:
595 ds = ""
595 ds = ""
596 self.opener("journal.dirstate", "w").write(ds)
596 self.opener("journal.dirstate", "w").write(ds)
597 self.opener("journal.branch", "w").write(self.dirstate.branch())
597 self.opener("journal.branch", "w").write(self.dirstate.branch())
598
598
599 renames = [(self.sjoin("journal"), self.sjoin("undo")),
599 renames = [(self.sjoin("journal"), self.sjoin("undo")),
600 (self.join("journal.dirstate"), self.join("undo.dirstate")),
600 (self.join("journal.dirstate"), self.join("undo.dirstate")),
601 (self.join("journal.branch"), self.join("undo.branch"))]
601 (self.join("journal.branch"), self.join("undo.branch"))]
602 tr = transaction.transaction(self.ui.warn, self.sopener,
602 tr = transaction.transaction(self.ui.warn, self.sopener,
603 self.sjoin("journal"),
603 self.sjoin("journal"),
604 aftertrans(renames),
604 aftertrans(renames),
605 self.store.createmode)
605 self.store.createmode)
606 self._transref = weakref.ref(tr)
606 self._transref = weakref.ref(tr)
607 return tr
607 return tr
608
608
609 def recover(self):
609 def recover(self):
610 lock = self.lock()
610 lock = self.lock()
611 try:
611 try:
612 if os.path.exists(self.sjoin("journal")):
612 if os.path.exists(self.sjoin("journal")):
613 self.ui.status(_("rolling back interrupted transaction\n"))
613 self.ui.status(_("rolling back interrupted transaction\n"))
614 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
614 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
615 self.invalidate()
615 self.invalidate()
616 return True
616 return True
617 else:
617 else:
618 self.ui.warn(_("no interrupted transaction available\n"))
618 self.ui.warn(_("no interrupted transaction available\n"))
619 return False
619 return False
620 finally:
620 finally:
621 lock.release()
621 lock.release()
622
622
623 def rollback(self):
623 def rollback(self):
624 wlock = lock = None
624 wlock = lock = None
625 try:
625 try:
626 wlock = self.wlock()
626 wlock = self.wlock()
627 lock = self.lock()
627 lock = self.lock()
628 if os.path.exists(self.sjoin("undo")):
628 if os.path.exists(self.sjoin("undo")):
629 self.ui.status(_("rolling back last transaction\n"))
629 self.ui.status(_("rolling back last transaction\n"))
630 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
630 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
631 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
631 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
632 try:
632 try:
633 branch = self.opener("undo.branch").read()
633 branch = self.opener("undo.branch").read()
634 self.dirstate.setbranch(branch)
634 self.dirstate.setbranch(branch)
635 except IOError:
635 except IOError:
636 self.ui.warn(_("Named branch could not be reset, "
636 self.ui.warn(_("Named branch could not be reset, "
637 "current branch still is: %s\n")
637 "current branch still is: %s\n")
638 % encoding.tolocal(self.dirstate.branch()))
638 % encoding.tolocal(self.dirstate.branch()))
639 self.invalidate()
639 self.invalidate()
640 self.dirstate.invalidate()
640 self.dirstate.invalidate()
641 else:
641 else:
642 self.ui.warn(_("no rollback information available\n"))
642 self.ui.warn(_("no rollback information available\n"))
643 finally:
643 finally:
644 release(lock, wlock)
644 release(lock, wlock)
645
645
646 def invalidate(self):
646 def invalidate(self):
647 for a in "changelog manifest".split():
647 for a in "changelog manifest".split():
648 if a in self.__dict__:
648 if a in self.__dict__:
649 delattr(self, a)
649 delattr(self, a)
650 self.tagscache = None
650 self.tagscache = None
651 self._tagstypecache = None
651 self._tagstypecache = None
652 self.nodetagscache = None
652 self.nodetagscache = None
653 self.branchcache = None
653 self.branchcache = None
654 self._ubranchcache = None
654 self._ubranchcache = None
655 self._branchcachetip = None
655 self._branchcachetip = None
656
656
657 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
657 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
658 try:
658 try:
659 l = lock.lock(lockname, 0, releasefn, desc=desc)
659 l = lock.lock(lockname, 0, releasefn, desc=desc)
660 except error.LockHeld, inst:
660 except error.LockHeld, inst:
661 if not wait:
661 if not wait:
662 raise
662 raise
663 self.ui.warn(_("waiting for lock on %s held by %r\n") %
663 self.ui.warn(_("waiting for lock on %s held by %r\n") %
664 (desc, inst.locker))
664 (desc, inst.locker))
665 # default to 600 seconds timeout
665 # default to 600 seconds timeout
666 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
666 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
667 releasefn, desc=desc)
667 releasefn, desc=desc)
668 if acquirefn:
668 if acquirefn:
669 acquirefn()
669 acquirefn()
670 return l
670 return l
671
671
672 def lock(self, wait=True):
672 def lock(self, wait=True):
673 l = self._lockref and self._lockref()
673 l = self._lockref and self._lockref()
674 if l is not None and l.held:
674 if l is not None and l.held:
675 l.lock()
675 l.lock()
676 return l
676 return l
677
677
678 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
678 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
679 _('repository %s') % self.origroot)
679 _('repository %s') % self.origroot)
680 self._lockref = weakref.ref(l)
680 self._lockref = weakref.ref(l)
681 return l
681 return l
682
682
683 def wlock(self, wait=True):
683 def wlock(self, wait=True):
684 l = self._wlockref and self._wlockref()
684 l = self._wlockref and self._wlockref()
685 if l is not None and l.held:
685 if l is not None and l.held:
686 l.lock()
686 l.lock()
687 return l
687 return l
688
688
689 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
689 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
690 self.dirstate.invalidate, _('working directory of %s') %
690 self.dirstate.invalidate, _('working directory of %s') %
691 self.origroot)
691 self.origroot)
692 self._wlockref = weakref.ref(l)
692 self._wlockref = weakref.ref(l)
693 return l
693 return l
694
694
695 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
695 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
696 """
696 """
697 commit an individual file as part of a larger transaction
697 commit an individual file as part of a larger transaction
698 """
698 """
699
699
700 fname = fctx.path()
700 fname = fctx.path()
701 text = fctx.data()
701 text = fctx.data()
702 flog = self.file(fname)
702 flog = self.file(fname)
703 fparent1 = manifest1.get(fname, nullid)
703 fparent1 = manifest1.get(fname, nullid)
704 fparent2 = fparent2o = manifest2.get(fname, nullid)
704 fparent2 = fparent2o = manifest2.get(fname, nullid)
705
705
706 meta = {}
706 meta = {}
707 copy = fctx.renamed()
707 copy = fctx.renamed()
708 if copy and copy[0] != fname:
708 if copy and copy[0] != fname:
709 # Mark the new revision of this file as a copy of another
709 # Mark the new revision of this file as a copy of another
710 # file. This copy data will effectively act as a parent
710 # file. This copy data will effectively act as a parent
711 # of this new revision. If this is a merge, the first
711 # of this new revision. If this is a merge, the first
712 # parent will be the nullid (meaning "look up the copy data")
712 # parent will be the nullid (meaning "look up the copy data")
713 # and the second one will be the other parent. For example:
713 # and the second one will be the other parent. For example:
714 #
714 #
715 # 0 --- 1 --- 3 rev1 changes file foo
715 # 0 --- 1 --- 3 rev1 changes file foo
716 # \ / rev2 renames foo to bar and changes it
716 # \ / rev2 renames foo to bar and changes it
717 # \- 2 -/ rev3 should have bar with all changes and
717 # \- 2 -/ rev3 should have bar with all changes and
718 # should record that bar descends from
718 # should record that bar descends from
719 # bar in rev2 and foo in rev1
719 # bar in rev2 and foo in rev1
720 #
720 #
721 # this allows this merge to succeed:
721 # this allows this merge to succeed:
722 #
722 #
723 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
723 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
724 # \ / merging rev3 and rev4 should use bar@rev2
724 # \ / merging rev3 and rev4 should use bar@rev2
725 # \- 2 --- 4 as the merge base
725 # \- 2 --- 4 as the merge base
726 #
726 #
727
727
728 cfname = copy[0]
728 cfname = copy[0]
729 crev = manifest1.get(cfname)
729 crev = manifest1.get(cfname)
730 newfparent = fparent2
730 newfparent = fparent2
731
731
732 if manifest2: # branch merge
732 if manifest2: # branch merge
733 if fparent2 == nullid or crev is None: # copied on remote side
733 if fparent2 == nullid or crev is None: # copied on remote side
734 if cfname in manifest2:
734 if cfname in manifest2:
735 crev = manifest2[cfname]
735 crev = manifest2[cfname]
736 newfparent = fparent1
736 newfparent = fparent1
737
737
738 # find source in nearest ancestor if we've lost track
738 # find source in nearest ancestor if we've lost track
739 if not crev:
739 if not crev:
740 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
740 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
741 (fname, cfname))
741 (fname, cfname))
742 for ancestor in self['.'].ancestors():
742 for ancestor in self['.'].ancestors():
743 if cfname in ancestor:
743 if cfname in ancestor:
744 crev = ancestor[cfname].filenode()
744 crev = ancestor[cfname].filenode()
745 break
745 break
746
746
747 self.ui.debug(_(" %s: copy %s:%s\n") % (fname, cfname, hex(crev)))
747 self.ui.debug(_(" %s: copy %s:%s\n") % (fname, cfname, hex(crev)))
748 meta["copy"] = cfname
748 meta["copy"] = cfname
749 meta["copyrev"] = hex(crev)
749 meta["copyrev"] = hex(crev)
750 fparent1, fparent2 = nullid, newfparent
750 fparent1, fparent2 = nullid, newfparent
751 elif fparent2 != nullid:
751 elif fparent2 != nullid:
752 # is one parent an ancestor of the other?
752 # is one parent an ancestor of the other?
753 fparentancestor = flog.ancestor(fparent1, fparent2)
753 fparentancestor = flog.ancestor(fparent1, fparent2)
754 if fparentancestor == fparent1:
754 if fparentancestor == fparent1:
755 fparent1, fparent2 = fparent2, nullid
755 fparent1, fparent2 = fparent2, nullid
756 elif fparentancestor == fparent2:
756 elif fparentancestor == fparent2:
757 fparent2 = nullid
757 fparent2 = nullid
758
758
759 # is the file changed?
759 # is the file changed?
760 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
760 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
761 changelist.append(fname)
761 changelist.append(fname)
762 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
762 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
763
763
764 # are just the flags changed during merge?
764 # are just the flags changed during merge?
765 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
765 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
766 changelist.append(fname)
766 changelist.append(fname)
767
767
768 return fparent1
768 return fparent1
769
769
770 def commit(self, files=None, text="", user=None, date=None, match=None,
770 def commit(self, files=None, text="", user=None, date=None, match=None,
771 force=False, editor=False, extra={}):
771 force=False, editor=False, extra={}):
772 wlock = lock = None
772 wlock = lock = None
773 if extra.get("close"):
773 if extra.get("close"):
774 force = True
774 force = True
775 if files:
775 if files:
776 files = list(set(files))
776 files = list(set(files))
777
777
778 ret = None
778 ret = None
779 wlock = self.wlock()
779 wlock = self.wlock()
780 try:
780 try:
781 p1, p2 = self.dirstate.parents()
781 p1, p2 = self.dirstate.parents()
782
782
783 if (not force and p2 != nullid and
783 if (not force and p2 != nullid and
784 (match and (match.files() or match.anypats()))):
784 (match and (match.files() or match.anypats()))):
785 raise util.Abort(_('cannot partially commit a merge '
785 raise util.Abort(_('cannot partially commit a merge '
786 '(do not specify files or patterns)'))
786 '(do not specify files or patterns)'))
787
787
788 if files:
788 if files:
789 modified, removed = [], []
789 modified, removed = [], []
790 for f in files:
790 for f in files:
791 s = self.dirstate[f]
791 s = self.dirstate[f]
792 if s in 'nma':
792 if s in 'nma':
793 modified.append(f)
793 modified.append(f)
794 elif s == 'r':
794 elif s == 'r':
795 removed.append(f)
795 removed.append(f)
796 else:
796 else:
797 self.ui.warn(_("%s not tracked!\n") % f)
797 self.ui.warn(_("%s not tracked!\n") % f)
798 changes = [modified, [], removed, [], []]
798 changes = [modified, [], removed, [], []]
799 else:
799 else:
800 changes = self.status(match=match)
800 changes = self.status(match=match)
801
801
802 if (not (changes[0] or changes[1] or changes[2])
802 if (not (changes[0] or changes[1] or changes[2])
803 and not force and p2 == nullid and
803 and not force and p2 == nullid and
804 self[None].branch() == self['.'].branch()):
804 self[None].branch() == self['.'].branch()):
805 self.ui.status(_("nothing changed\n"))
805 self.ui.status(_("nothing changed\n"))
806 return None
806 return None
807
807
808 ms = merge_.mergestate(self)
808 ms = merge_.mergestate(self)
809 for f in changes[0]:
809 for f in changes[0]:
810 if f in ms and ms[f] == 'u':
810 if f in ms and ms[f] == 'u':
811 raise util.Abort(_("unresolved merge conflicts "
811 raise util.Abort(_("unresolved merge conflicts "
812 "(see hg resolve)"))
812 "(see hg resolve)"))
813 wctx = context.workingctx(self, (p1, p2), text, user, date,
813 wctx = context.workingctx(self, (p1, p2), text, user, date,
814 extra, changes)
814 extra, changes)
815 ret = self.commitctx(wctx, editor, True)
815 ret = self.commitctx(wctx, editor, True)
816 ms.reset()
816 ms.reset()
817
817
818 # update dirstate
818 # update dirstate
819 for f in changes[0] + changes[1]:
819 for f in changes[0] + changes[1]:
820 self.dirstate.normal(f)
820 self.dirstate.normal(f)
821 for f in changes[2]:
821 for f in changes[2]:
822 self.dirstate.forget(f)
822 self.dirstate.forget(f)
823 self.dirstate.setparents(ret)
823 self.dirstate.setparents(ret)
824
824
825 return ret
825 return ret
826
826
827 finally:
827 finally:
828 if ret == None:
828 if ret == None:
829 self.dirstate.invalidate() # didn't successfully commit
829 self.dirstate.invalidate() # didn't successfully commit
830 wlock.release()
830 wlock.release()
831
831
832 def commitctx(self, ctx, editor=None, error=False):
832 def commitctx(self, ctx, editor=None, error=False):
833 """Add a new revision to current repository.
833 """Add a new revision to current repository.
834
834
835 Revision information is passed via the context argument.
835 Revision information is passed via the context argument.
836 If editor is supplied, it is called to get a commit message.
836 If editor is supplied, it is called to get a commit message.
837 If working is set, the working directory is affected.
837 If working is set, the working directory is affected.
838 """
838 """
839
839
840 tr = lock = None
840 tr = lock = None
841 remove = ctx.removed()
841 remove = ctx.removed()
842 p1, p2 = ctx.p1(), ctx.p2()
842 p1, p2 = ctx.p1(), ctx.p2()
843 m1 = p1.manifest().copy()
843 m1 = p1.manifest().copy()
844 m2 = p2.manifest()
844 m2 = p2.manifest()
845 user = ctx.user()
845 user = ctx.user()
846
846
847 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
847 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
848 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
848 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
849
849
850 lock = self.lock()
850 lock = self.lock()
851 try:
851 try:
852 tr = self.transaction()
852 tr = self.transaction()
853 trp = weakref.proxy(tr)
853 trp = weakref.proxy(tr)
854
854
855 # check in files
855 # check in files
856 new = {}
856 new = {}
857 changed = []
857 changed = []
858 linkrev = len(self)
858 linkrev = len(self)
859 for f in sorted(ctx.modified() + ctx.added()):
859 for f in sorted(ctx.modified() + ctx.added()):
860 self.ui.note(f + "\n")
860 self.ui.note(f + "\n")
861 try:
861 try:
862 fctx = ctx[f]
862 fctx = ctx[f]
863 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
863 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
864 changed)
864 changed)
865 m1.set(f, fctx.flags())
865 m1.set(f, fctx.flags())
866 except (OSError, IOError):
866 except (OSError, IOError):
867 if error:
867 if error:
868 self.ui.warn(_("trouble committing %s!\n") % f)
868 self.ui.warn(_("trouble committing %s!\n") % f)
869 raise
869 raise
870 else:
870 else:
871 remove.append(f)
871 remove.append(f)
872
872
873 updated, added = [], []
873 updated, added = [], []
874 for f in sorted(changed):
874 for f in sorted(changed):
875 if f in m1 or f in m2:
875 if f in m1 or f in m2:
876 updated.append(f)
876 updated.append(f)
877 else:
877 else:
878 added.append(f)
878 added.append(f)
879
879
880 # update manifest
880 # update manifest
881 m1.update(new)
881 m1.update(new)
882 removed = [f for f in sorted(remove) if f in m1 or f in m2]
882 removed = [f for f in sorted(remove) if f in m1 or f in m2]
883 removed1 = []
883 removed1 = []
884
884
885 for f in removed:
885 for f in removed:
886 if f in m1:
886 if f in m1:
887 del m1[f]
887 del m1[f]
888 removed1.append(f)
888 removed1.append(f)
889 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
889 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
890 p2.manifestnode(), (new, removed1))
890 p2.manifestnode(), (new, removed1))
891
891
892 text = ctx.description()
892 text = ctx.description()
893 if editor:
893 if editor:
894 text = editor(self, ctx, added, updated, removed)
894 text = editor(self, ctx, added, updated, removed)
895
895
896 lines = [line.rstrip() for line in text.rstrip().splitlines()]
896 lines = [line.rstrip() for line in text.rstrip().splitlines()]
897 while lines and not lines[0]:
897 while lines and not lines[0]:
898 del lines[0]
898 del lines[0]
899 text = '\n'.join(lines)
899 text = '\n'.join(lines)
900
900
901 self.changelog.delayupdate()
901 self.changelog.delayupdate()
902 n = self.changelog.add(mn, changed + removed, text, trp,
902 n = self.changelog.add(mn, changed + removed, text, trp,
903 p1.node(), p2.node(),
903 p1.node(), p2.node(),
904 user, ctx.date(), ctx.extra().copy())
904 user, ctx.date(), ctx.extra().copy())
905 p = lambda: self.changelog.writepending() and self.root or ""
905 p = lambda: self.changelog.writepending() and self.root or ""
906 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
906 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
907 parent2=xp2, pending=p)
907 parent2=xp2, pending=p)
908 self.changelog.finalize(trp)
908 self.changelog.finalize(trp)
909 tr.close()
909 tr.close()
910
910
911 if self.branchcache:
911 if self.branchcache:
912 self.branchtags()
912 self.branchtags()
913
913
914 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
914 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
915 return n
915 return n
916 finally:
916 finally:
917 del tr
917 del tr
918 lock.release()
918 lock.release()
919
919
920 def walk(self, match, node=None):
920 def walk(self, match, node=None):
921 '''
921 '''
922 walk recursively through the directory tree or a given
922 walk recursively through the directory tree or a given
923 changeset, finding all files matched by the match
923 changeset, finding all files matched by the match
924 function
924 function
925 '''
925 '''
926 return self[node].walk(match)
926 return self[node].walk(match)
927
927
928 def status(self, node1='.', node2=None, match=None,
928 def status(self, node1='.', node2=None, match=None,
929 ignored=False, clean=False, unknown=False):
929 ignored=False, clean=False, unknown=False):
930 """return status of files between two nodes or node and working directory
930 """return status of files between two nodes or node and working directory
931
931
932 If node1 is None, use the first dirstate parent instead.
932 If node1 is None, use the first dirstate parent instead.
933 If node2 is None, compare node1 with working directory.
933 If node2 is None, compare node1 with working directory.
934 """
934 """
935
935
936 def mfmatches(ctx):
936 def mfmatches(ctx):
937 mf = ctx.manifest().copy()
937 mf = ctx.manifest().copy()
938 for fn in mf.keys():
938 for fn in mf.keys():
939 if not match(fn):
939 if not match(fn):
940 del mf[fn]
940 del mf[fn]
941 return mf
941 return mf
942
942
943 if isinstance(node1, context.changectx):
943 if isinstance(node1, context.changectx):
944 ctx1 = node1
944 ctx1 = node1
945 else:
945 else:
946 ctx1 = self[node1]
946 ctx1 = self[node1]
947 if isinstance(node2, context.changectx):
947 if isinstance(node2, context.changectx):
948 ctx2 = node2
948 ctx2 = node2
949 else:
949 else:
950 ctx2 = self[node2]
950 ctx2 = self[node2]
951
951
952 working = ctx2.rev() is None
952 working = ctx2.rev() is None
953 parentworking = working and ctx1 == self['.']
953 parentworking = working and ctx1 == self['.']
954 match = match or match_.always(self.root, self.getcwd())
954 match = match or match_.always(self.root, self.getcwd())
955 listignored, listclean, listunknown = ignored, clean, unknown
955 listignored, listclean, listunknown = ignored, clean, unknown
956
956
957 # load earliest manifest first for caching reasons
957 # load earliest manifest first for caching reasons
958 if not working and ctx2.rev() < ctx1.rev():
958 if not working and ctx2.rev() < ctx1.rev():
959 ctx2.manifest()
959 ctx2.manifest()
960
960
961 if not parentworking:
961 if not parentworking:
962 def bad(f, msg):
962 def bad(f, msg):
963 if f not in ctx1:
963 if f not in ctx1:
964 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
964 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
965 return False
965 return False
966 match.bad = bad
966 match.bad = bad
967
967
968 if working: # we need to scan the working dir
968 if working: # we need to scan the working dir
969 s = self.dirstate.status(match, listignored, listclean, listunknown)
969 s = self.dirstate.status(match, listignored, listclean, listunknown)
970 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
970 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
971
971
972 # check for any possibly clean files
972 # check for any possibly clean files
973 if parentworking and cmp:
973 if parentworking and cmp:
974 fixup = []
974 fixup = []
975 # do a full compare of any files that might have changed
975 # do a full compare of any files that might have changed
976 for f in sorted(cmp):
976 for f in sorted(cmp):
977 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
977 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
978 or ctx1[f].cmp(ctx2[f].data())):
978 or ctx1[f].cmp(ctx2[f].data())):
979 modified.append(f)
979 modified.append(f)
980 else:
980 else:
981 fixup.append(f)
981 fixup.append(f)
982
982
983 if listclean:
983 if listclean:
984 clean += fixup
984 clean += fixup
985
985
986 # update dirstate for files that are actually clean
986 # update dirstate for files that are actually clean
987 if fixup:
987 if fixup:
988 wlock = None
988 wlock = None
989 try:
989 try:
990 try:
990 try:
991 # updating the dirstate is optional
991 # updating the dirstate is optional
992 # so we don't wait on the lock
992 # so we don't wait on the lock
993 wlock = self.wlock(False)
993 wlock = self.wlock(False)
994 for f in fixup:
994 for f in fixup:
995 self.dirstate.normal(f)
995 self.dirstate.normal(f)
996 except error.LockError:
996 except error.LockError:
997 pass
997 pass
998 finally:
998 finally:
999 release(wlock)
999 release(wlock)
1000
1000
1001 if not parentworking:
1001 if not parentworking:
1002 mf1 = mfmatches(ctx1)
1002 mf1 = mfmatches(ctx1)
1003 if working:
1003 if working:
1004 # we are comparing working dir against non-parent
1004 # we are comparing working dir against non-parent
1005 # generate a pseudo-manifest for the working dir
1005 # generate a pseudo-manifest for the working dir
1006 mf2 = mfmatches(self['.'])
1006 mf2 = mfmatches(self['.'])
1007 for f in cmp + modified + added:
1007 for f in cmp + modified + added:
1008 mf2[f] = None
1008 mf2[f] = None
1009 mf2.set(f, ctx2.flags(f))
1009 mf2.set(f, ctx2.flags(f))
1010 for f in removed:
1010 for f in removed:
1011 if f in mf2:
1011 if f in mf2:
1012 del mf2[f]
1012 del mf2[f]
1013 else:
1013 else:
1014 # we are comparing two revisions
1014 # we are comparing two revisions
1015 deleted, unknown, ignored = [], [], []
1015 deleted, unknown, ignored = [], [], []
1016 mf2 = mfmatches(ctx2)
1016 mf2 = mfmatches(ctx2)
1017
1017
1018 modified, added, clean = [], [], []
1018 modified, added, clean = [], [], []
1019 for fn in mf2:
1019 for fn in mf2:
1020 if fn in mf1:
1020 if fn in mf1:
1021 if (mf1.flags(fn) != mf2.flags(fn) or
1021 if (mf1.flags(fn) != mf2.flags(fn) or
1022 (mf1[fn] != mf2[fn] and
1022 (mf1[fn] != mf2[fn] and
1023 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1023 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1024 modified.append(fn)
1024 modified.append(fn)
1025 elif listclean:
1025 elif listclean:
1026 clean.append(fn)
1026 clean.append(fn)
1027 del mf1[fn]
1027 del mf1[fn]
1028 else:
1028 else:
1029 added.append(fn)
1029 added.append(fn)
1030 removed = mf1.keys()
1030 removed = mf1.keys()
1031
1031
1032 r = modified, added, removed, deleted, unknown, ignored, clean
1032 r = modified, added, removed, deleted, unknown, ignored, clean
1033 [l.sort() for l in r]
1033 [l.sort() for l in r]
1034 return r
1034 return r
1035
1035
1036 def add(self, list):
1036 def add(self, list):
1037 wlock = self.wlock()
1037 wlock = self.wlock()
1038 try:
1038 try:
1039 rejected = []
1039 rejected = []
1040 for f in list:
1040 for f in list:
1041 p = self.wjoin(f)
1041 p = self.wjoin(f)
1042 try:
1042 try:
1043 st = os.lstat(p)
1043 st = os.lstat(p)
1044 except:
1044 except:
1045 self.ui.warn(_("%s does not exist!\n") % f)
1045 self.ui.warn(_("%s does not exist!\n") % f)
1046 rejected.append(f)
1046 rejected.append(f)
1047 continue
1047 continue
1048 if st.st_size > 10000000:
1048 if st.st_size > 10000000:
1049 self.ui.warn(_("%s: files over 10MB may cause memory and"
1049 self.ui.warn(_("%s: files over 10MB may cause memory and"
1050 " performance problems\n"
1050 " performance problems\n"
1051 "(use 'hg revert %s' to unadd the file)\n")
1051 "(use 'hg revert %s' to unadd the file)\n")
1052 % (f, f))
1052 % (f, f))
1053 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1053 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1054 self.ui.warn(_("%s not added: only files and symlinks "
1054 self.ui.warn(_("%s not added: only files and symlinks "
1055 "supported currently\n") % f)
1055 "supported currently\n") % f)
1056 rejected.append(p)
1056 rejected.append(p)
1057 elif self.dirstate[f] in 'amn':
1057 elif self.dirstate[f] in 'amn':
1058 self.ui.warn(_("%s already tracked!\n") % f)
1058 self.ui.warn(_("%s already tracked!\n") % f)
1059 elif self.dirstate[f] == 'r':
1059 elif self.dirstate[f] == 'r':
1060 self.dirstate.normallookup(f)
1060 self.dirstate.normallookup(f)
1061 else:
1061 else:
1062 self.dirstate.add(f)
1062 self.dirstate.add(f)
1063 return rejected
1063 return rejected
1064 finally:
1064 finally:
1065 wlock.release()
1065 wlock.release()
1066
1066
1067 def forget(self, list):
1067 def forget(self, list):
1068 wlock = self.wlock()
1068 wlock = self.wlock()
1069 try:
1069 try:
1070 for f in list:
1070 for f in list:
1071 if self.dirstate[f] != 'a':
1071 if self.dirstate[f] != 'a':
1072 self.ui.warn(_("%s not added!\n") % f)
1072 self.ui.warn(_("%s not added!\n") % f)
1073 else:
1073 else:
1074 self.dirstate.forget(f)
1074 self.dirstate.forget(f)
1075 finally:
1075 finally:
1076 wlock.release()
1076 wlock.release()
1077
1077
1078 def remove(self, list, unlink=False):
1078 def remove(self, list, unlink=False):
1079 wlock = None
1079 wlock = None
1080 try:
1080 try:
1081 if unlink:
1081 if unlink:
1082 for f in list:
1082 for f in list:
1083 try:
1083 try:
1084 util.unlink(self.wjoin(f))
1084 util.unlink(self.wjoin(f))
1085 except OSError, inst:
1085 except OSError, inst:
1086 if inst.errno != errno.ENOENT:
1086 if inst.errno != errno.ENOENT:
1087 raise
1087 raise
1088 wlock = self.wlock()
1088 wlock = self.wlock()
1089 for f in list:
1089 for f in list:
1090 if unlink and os.path.exists(self.wjoin(f)):
1090 if unlink and os.path.exists(self.wjoin(f)):
1091 self.ui.warn(_("%s still exists!\n") % f)
1091 self.ui.warn(_("%s still exists!\n") % f)
1092 elif self.dirstate[f] == 'a':
1092 elif self.dirstate[f] == 'a':
1093 self.dirstate.forget(f)
1093 self.dirstate.forget(f)
1094 elif f not in self.dirstate:
1094 elif f not in self.dirstate:
1095 self.ui.warn(_("%s not tracked!\n") % f)
1095 self.ui.warn(_("%s not tracked!\n") % f)
1096 else:
1096 else:
1097 self.dirstate.remove(f)
1097 self.dirstate.remove(f)
1098 finally:
1098 finally:
1099 release(wlock)
1099 release(wlock)
1100
1100
1101 def undelete(self, list):
1101 def undelete(self, list):
1102 manifests = [self.manifest.read(self.changelog.read(p)[0])
1102 manifests = [self.manifest.read(self.changelog.read(p)[0])
1103 for p in self.dirstate.parents() if p != nullid]
1103 for p in self.dirstate.parents() if p != nullid]
1104 wlock = self.wlock()
1104 wlock = self.wlock()
1105 try:
1105 try:
1106 for f in list:
1106 for f in list:
1107 if self.dirstate[f] != 'r':
1107 if self.dirstate[f] != 'r':
1108 self.ui.warn(_("%s not removed!\n") % f)
1108 self.ui.warn(_("%s not removed!\n") % f)
1109 else:
1109 else:
1110 m = f in manifests[0] and manifests[0] or manifests[1]
1110 m = f in manifests[0] and manifests[0] or manifests[1]
1111 t = self.file(f).read(m[f])
1111 t = self.file(f).read(m[f])
1112 self.wwrite(f, t, m.flags(f))
1112 self.wwrite(f, t, m.flags(f))
1113 self.dirstate.normal(f)
1113 self.dirstate.normal(f)
1114 finally:
1114 finally:
1115 wlock.release()
1115 wlock.release()
1116
1116
1117 def copy(self, source, dest):
1117 def copy(self, source, dest):
1118 p = self.wjoin(dest)
1118 p = self.wjoin(dest)
1119 if not (os.path.exists(p) or os.path.islink(p)):
1119 if not (os.path.exists(p) or os.path.islink(p)):
1120 self.ui.warn(_("%s does not exist!\n") % dest)
1120 self.ui.warn(_("%s does not exist!\n") % dest)
1121 elif not (os.path.isfile(p) or os.path.islink(p)):
1121 elif not (os.path.isfile(p) or os.path.islink(p)):
1122 self.ui.warn(_("copy failed: %s is not a file or a "
1122 self.ui.warn(_("copy failed: %s is not a file or a "
1123 "symbolic link\n") % dest)
1123 "symbolic link\n") % dest)
1124 else:
1124 else:
1125 wlock = self.wlock()
1125 wlock = self.wlock()
1126 try:
1126 try:
1127 if self.dirstate[dest] in '?r':
1127 if self.dirstate[dest] in '?r':
1128 self.dirstate.add(dest)
1128 self.dirstate.add(dest)
1129 self.dirstate.copy(source, dest)
1129 self.dirstate.copy(source, dest)
1130 finally:
1130 finally:
1131 wlock.release()
1131 wlock.release()
1132
1132
1133 def heads(self, start=None, closed=True):
1133 def heads(self, start=None, closed=True):
1134 heads = self.changelog.heads(start)
1134 heads = self.changelog.heads(start)
1135 def display(head):
1135 def display(head):
1136 if closed:
1136 if closed:
1137 return True
1137 return True
1138 extras = self.changelog.read(head)[5]
1138 extras = self.changelog.read(head)[5]
1139 return ('close' not in extras)
1139 return ('close' not in extras)
1140 # sort the output in rev descending order
1140 # sort the output in rev descending order
1141 heads = [(-self.changelog.rev(h), h) for h in heads if display(h)]
1141 heads = [(-self.changelog.rev(h), h) for h in heads if display(h)]
1142 return [n for (r, n) in sorted(heads)]
1142 return [n for (r, n) in sorted(heads)]
1143
1143
1144 def branchheads(self, branch=None, start=None, closed=True):
1144 def branchheads(self, branch=None, start=None, closed=True):
1145 if branch is None:
1145 if branch is None:
1146 branch = self[None].branch()
1146 branch = self[None].branch()
1147 branches = self._branchheads()
1147 branches = self._branchheads()
1148 if branch not in branches:
1148 if branch not in branches:
1149 return []
1149 return []
1150 bheads = branches[branch]
1150 bheads = branches[branch]
1151 # the cache returns heads ordered lowest to highest
1151 # the cache returns heads ordered lowest to highest
1152 bheads.reverse()
1152 bheads.reverse()
1153 if start is not None:
1153 if start is not None:
1154 # filter out the heads that cannot be reached from startrev
1154 # filter out the heads that cannot be reached from startrev
1155 bheads = self.changelog.nodesbetween([start], bheads)[2]
1155 bheads = self.changelog.nodesbetween([start], bheads)[2]
1156 if not closed:
1156 if not closed:
1157 bheads = [h for h in bheads if
1157 bheads = [h for h in bheads if
1158 ('close' not in self.changelog.read(h)[5])]
1158 ('close' not in self.changelog.read(h)[5])]
1159 return bheads
1159 return bheads
1160
1160
1161 def branches(self, nodes):
1161 def branches(self, nodes):
1162 if not nodes:
1162 if not nodes:
1163 nodes = [self.changelog.tip()]
1163 nodes = [self.changelog.tip()]
1164 b = []
1164 b = []
1165 for n in nodes:
1165 for n in nodes:
1166 t = n
1166 t = n
1167 while 1:
1167 while 1:
1168 p = self.changelog.parents(n)
1168 p = self.changelog.parents(n)
1169 if p[1] != nullid or p[0] == nullid:
1169 if p[1] != nullid or p[0] == nullid:
1170 b.append((t, n, p[0], p[1]))
1170 b.append((t, n, p[0], p[1]))
1171 break
1171 break
1172 n = p[0]
1172 n = p[0]
1173 return b
1173 return b
1174
1174
1175 def between(self, pairs):
1175 def between(self, pairs):
1176 r = []
1176 r = []
1177
1177
1178 for top, bottom in pairs:
1178 for top, bottom in pairs:
1179 n, l, i = top, [], 0
1179 n, l, i = top, [], 0
1180 f = 1
1180 f = 1
1181
1181
1182 while n != bottom and n != nullid:
1182 while n != bottom and n != nullid:
1183 p = self.changelog.parents(n)[0]
1183 p = self.changelog.parents(n)[0]
1184 if i == f:
1184 if i == f:
1185 l.append(n)
1185 l.append(n)
1186 f = f * 2
1186 f = f * 2
1187 n = p
1187 n = p
1188 i += 1
1188 i += 1
1189
1189
1190 r.append(l)
1190 r.append(l)
1191
1191
1192 return r
1192 return r
1193
1193
1194 def findincoming(self, remote, base=None, heads=None, force=False):
1194 def findincoming(self, remote, base=None, heads=None, force=False):
1195 """Return list of roots of the subsets of missing nodes from remote
1195 """Return list of roots of the subsets of missing nodes from remote
1196
1196
1197 If base dict is specified, assume that these nodes and their parents
1197 If base dict is specified, assume that these nodes and their parents
1198 exist on the remote side and that no child of a node of base exists
1198 exist on the remote side and that no child of a node of base exists
1199 in both remote and self.
1199 in both remote and self.
1200 Furthermore base will be updated to include the nodes that exists
1200 Furthermore base will be updated to include the nodes that exists
1201 in self and remote but no children exists in self and remote.
1201 in self and remote but no children exists in self and remote.
1202 If a list of heads is specified, return only nodes which are heads
1202 If a list of heads is specified, return only nodes which are heads
1203 or ancestors of these heads.
1203 or ancestors of these heads.
1204
1204
1205 All the ancestors of base are in self and in remote.
1205 All the ancestors of base are in self and in remote.
1206 All the descendants of the list returned are missing in self.
1206 All the descendants of the list returned are missing in self.
1207 (and so we know that the rest of the nodes are missing in remote, see
1207 (and so we know that the rest of the nodes are missing in remote, see
1208 outgoing)
1208 outgoing)
1209 """
1209 """
1210 return self.findcommonincoming(remote, base, heads, force)[1]
1210 return self.findcommonincoming(remote, base, heads, force)[1]
1211
1211
1212 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1212 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1213 """Return a tuple (common, missing roots, heads) used to identify
1213 """Return a tuple (common, missing roots, heads) used to identify
1214 missing nodes from remote.
1214 missing nodes from remote.
1215
1215
1216 If base dict is specified, assume that these nodes and their parents
1216 If base dict is specified, assume that these nodes and their parents
1217 exist on the remote side and that no child of a node of base exists
1217 exist on the remote side and that no child of a node of base exists
1218 in both remote and self.
1218 in both remote and self.
1219 Furthermore base will be updated to include the nodes that exists
1219 Furthermore base will be updated to include the nodes that exists
1220 in self and remote but no children exists in self and remote.
1220 in self and remote but no children exists in self and remote.
1221 If a list of heads is specified, return only nodes which are heads
1221 If a list of heads is specified, return only nodes which are heads
1222 or ancestors of these heads.
1222 or ancestors of these heads.
1223
1223
1224 All the ancestors of base are in self and in remote.
1224 All the ancestors of base are in self and in remote.
1225 """
1225 """
1226 m = self.changelog.nodemap
1226 m = self.changelog.nodemap
1227 search = []
1227 search = []
1228 fetch = set()
1228 fetch = set()
1229 seen = set()
1229 seen = set()
1230 seenbranch = set()
1230 seenbranch = set()
1231 if base == None:
1231 if base == None:
1232 base = {}
1232 base = {}
1233
1233
1234 if not heads:
1234 if not heads:
1235 heads = remote.heads()
1235 heads = remote.heads()
1236
1236
1237 if self.changelog.tip() == nullid:
1237 if self.changelog.tip() == nullid:
1238 base[nullid] = 1
1238 base[nullid] = 1
1239 if heads != [nullid]:
1239 if heads != [nullid]:
1240 return [nullid], [nullid], list(heads)
1240 return [nullid], [nullid], list(heads)
1241 return [nullid], [], []
1241 return [nullid], [], []
1242
1242
1243 # assume we're closer to the tip than the root
1243 # assume we're closer to the tip than the root
1244 # and start by examining the heads
1244 # and start by examining the heads
1245 self.ui.status(_("searching for changes\n"))
1245 self.ui.status(_("searching for changes\n"))
1246
1246
1247 unknown = []
1247 unknown = []
1248 for h in heads:
1248 for h in heads:
1249 if h not in m:
1249 if h not in m:
1250 unknown.append(h)
1250 unknown.append(h)
1251 else:
1251 else:
1252 base[h] = 1
1252 base[h] = 1
1253
1253
1254 heads = unknown
1254 heads = unknown
1255 if not unknown:
1255 if not unknown:
1256 return base.keys(), [], []
1256 return base.keys(), [], []
1257
1257
1258 req = set(unknown)
1258 req = set(unknown)
1259 reqcnt = 0
1259 reqcnt = 0
1260
1260
1261 # search through remote branches
1261 # search through remote branches
1262 # a 'branch' here is a linear segment of history, with four parts:
1262 # a 'branch' here is a linear segment of history, with four parts:
1263 # head, root, first parent, second parent
1263 # head, root, first parent, second parent
1264 # (a branch always has two parents (or none) by definition)
1264 # (a branch always has two parents (or none) by definition)
1265 unknown = remote.branches(unknown)
1265 unknown = remote.branches(unknown)
1266 while unknown:
1266 while unknown:
1267 r = []
1267 r = []
1268 while unknown:
1268 while unknown:
1269 n = unknown.pop(0)
1269 n = unknown.pop(0)
1270 if n[0] in seen:
1270 if n[0] in seen:
1271 continue
1271 continue
1272
1272
1273 self.ui.debug(_("examining %s:%s\n")
1273 self.ui.debug(_("examining %s:%s\n")
1274 % (short(n[0]), short(n[1])))
1274 % (short(n[0]), short(n[1])))
1275 if n[0] == nullid: # found the end of the branch
1275 if n[0] == nullid: # found the end of the branch
1276 pass
1276 pass
1277 elif n in seenbranch:
1277 elif n in seenbranch:
1278 self.ui.debug(_("branch already found\n"))
1278 self.ui.debug(_("branch already found\n"))
1279 continue
1279 continue
1280 elif n[1] and n[1] in m: # do we know the base?
1280 elif n[1] and n[1] in m: # do we know the base?
1281 self.ui.debug(_("found incomplete branch %s:%s\n")
1281 self.ui.debug(_("found incomplete branch %s:%s\n")
1282 % (short(n[0]), short(n[1])))
1282 % (short(n[0]), short(n[1])))
1283 search.append(n[0:2]) # schedule branch range for scanning
1283 search.append(n[0:2]) # schedule branch range for scanning
1284 seenbranch.add(n)
1284 seenbranch.add(n)
1285 else:
1285 else:
1286 if n[1] not in seen and n[1] not in fetch:
1286 if n[1] not in seen and n[1] not in fetch:
1287 if n[2] in m and n[3] in m:
1287 if n[2] in m and n[3] in m:
1288 self.ui.debug(_("found new changeset %s\n") %
1288 self.ui.debug(_("found new changeset %s\n") %
1289 short(n[1]))
1289 short(n[1]))
1290 fetch.add(n[1]) # earliest unknown
1290 fetch.add(n[1]) # earliest unknown
1291 for p in n[2:4]:
1291 for p in n[2:4]:
1292 if p in m:
1292 if p in m:
1293 base[p] = 1 # latest known
1293 base[p] = 1 # latest known
1294
1294
1295 for p in n[2:4]:
1295 for p in n[2:4]:
1296 if p not in req and p not in m:
1296 if p not in req and p not in m:
1297 r.append(p)
1297 r.append(p)
1298 req.add(p)
1298 req.add(p)
1299 seen.add(n[0])
1299 seen.add(n[0])
1300
1300
1301 if r:
1301 if r:
1302 reqcnt += 1
1302 reqcnt += 1
1303 self.ui.debug(_("request %d: %s\n") %
1303 self.ui.debug(_("request %d: %s\n") %
1304 (reqcnt, " ".join(map(short, r))))
1304 (reqcnt, " ".join(map(short, r))))
1305 for p in xrange(0, len(r), 10):
1305 for p in xrange(0, len(r), 10):
1306 for b in remote.branches(r[p:p+10]):
1306 for b in remote.branches(r[p:p+10]):
1307 self.ui.debug(_("received %s:%s\n") %
1307 self.ui.debug(_("received %s:%s\n") %
1308 (short(b[0]), short(b[1])))
1308 (short(b[0]), short(b[1])))
1309 unknown.append(b)
1309 unknown.append(b)
1310
1310
1311 # do binary search on the branches we found
1311 # do binary search on the branches we found
1312 while search:
1312 while search:
1313 newsearch = []
1313 newsearch = []
1314 reqcnt += 1
1314 reqcnt += 1
1315 for n, l in zip(search, remote.between(search)):
1315 for n, l in zip(search, remote.between(search)):
1316 l.append(n[1])
1316 l.append(n[1])
1317 p = n[0]
1317 p = n[0]
1318 f = 1
1318 f = 1
1319 for i in l:
1319 for i in l:
1320 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1320 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1321 if i in m:
1321 if i in m:
1322 if f <= 2:
1322 if f <= 2:
1323 self.ui.debug(_("found new branch changeset %s\n") %
1323 self.ui.debug(_("found new branch changeset %s\n") %
1324 short(p))
1324 short(p))
1325 fetch.add(p)
1325 fetch.add(p)
1326 base[i] = 1
1326 base[i] = 1
1327 else:
1327 else:
1328 self.ui.debug(_("narrowed branch search to %s:%s\n")
1328 self.ui.debug(_("narrowed branch search to %s:%s\n")
1329 % (short(p), short(i)))
1329 % (short(p), short(i)))
1330 newsearch.append((p, i))
1330 newsearch.append((p, i))
1331 break
1331 break
1332 p, f = i, f * 2
1332 p, f = i, f * 2
1333 search = newsearch
1333 search = newsearch
1334
1334
1335 # sanity check our fetch list
1335 # sanity check our fetch list
1336 for f in fetch:
1336 for f in fetch:
1337 if f in m:
1337 if f in m:
1338 raise error.RepoError(_("already have changeset ")
1338 raise error.RepoError(_("already have changeset ")
1339 + short(f[:4]))
1339 + short(f[:4]))
1340
1340
1341 if base.keys() == [nullid]:
1341 if base.keys() == [nullid]:
1342 if force:
1342 if force:
1343 self.ui.warn(_("warning: repository is unrelated\n"))
1343 self.ui.warn(_("warning: repository is unrelated\n"))
1344 else:
1344 else:
1345 raise util.Abort(_("repository is unrelated"))
1345 raise util.Abort(_("repository is unrelated"))
1346
1346
1347 self.ui.debug(_("found new changesets starting at ") +
1347 self.ui.debug(_("found new changesets starting at ") +
1348 " ".join([short(f) for f in fetch]) + "\n")
1348 " ".join([short(f) for f in fetch]) + "\n")
1349
1349
1350 self.ui.debug(_("%d total queries\n") % reqcnt)
1350 self.ui.debug(_("%d total queries\n") % reqcnt)
1351
1351
1352 return base.keys(), list(fetch), heads
1352 return base.keys(), list(fetch), heads
1353
1353
1354 def findoutgoing(self, remote, base=None, heads=None, force=False):
1354 def findoutgoing(self, remote, base=None, heads=None, force=False):
1355 """Return list of nodes that are roots of subsets not in remote
1355 """Return list of nodes that are roots of subsets not in remote
1356
1356
1357 If base dict is specified, assume that these nodes and their parents
1357 If base dict is specified, assume that these nodes and their parents
1358 exist on the remote side.
1358 exist on the remote side.
1359 If a list of heads is specified, return only nodes which are heads
1359 If a list of heads is specified, return only nodes which are heads
1360 or ancestors of these heads, and return a second element which
1360 or ancestors of these heads, and return a second element which
1361 contains all remote heads which get new children.
1361 contains all remote heads which get new children.
1362 """
1362 """
1363 if base == None:
1363 if base == None:
1364 base = {}
1364 base = {}
1365 self.findincoming(remote, base, heads, force=force)
1365 self.findincoming(remote, base, heads, force=force)
1366
1366
1367 self.ui.debug(_("common changesets up to ")
1367 self.ui.debug(_("common changesets up to ")
1368 + " ".join(map(short, base.keys())) + "\n")
1368 + " ".join(map(short, base.keys())) + "\n")
1369
1369
1370 remain = set(self.changelog.nodemap)
1370 remain = set(self.changelog.nodemap)
1371
1371
1372 # prune everything remote has from the tree
1372 # prune everything remote has from the tree
1373 remain.remove(nullid)
1373 remain.remove(nullid)
1374 remove = base.keys()
1374 remove = base.keys()
1375 while remove:
1375 while remove:
1376 n = remove.pop(0)
1376 n = remove.pop(0)
1377 if n in remain:
1377 if n in remain:
1378 remain.remove(n)
1378 remain.remove(n)
1379 for p in self.changelog.parents(n):
1379 for p in self.changelog.parents(n):
1380 remove.append(p)
1380 remove.append(p)
1381
1381
1382 # find every node whose parents have been pruned
1382 # find every node whose parents have been pruned
1383 subset = []
1383 subset = []
1384 # find every remote head that will get new children
1384 # find every remote head that will get new children
1385 updated_heads = set()
1385 updated_heads = set()
1386 for n in remain:
1386 for n in remain:
1387 p1, p2 = self.changelog.parents(n)
1387 p1, p2 = self.changelog.parents(n)
1388 if p1 not in remain and p2 not in remain:
1388 if p1 not in remain and p2 not in remain:
1389 subset.append(n)
1389 subset.append(n)
1390 if heads:
1390 if heads:
1391 if p1 in heads:
1391 if p1 in heads:
1392 updated_heads.add(p1)
1392 updated_heads.add(p1)
1393 if p2 in heads:
1393 if p2 in heads:
1394 updated_heads.add(p2)
1394 updated_heads.add(p2)
1395
1395
1396 # this is the set of all roots we have to push
1396 # this is the set of all roots we have to push
1397 if heads:
1397 if heads:
1398 return subset, list(updated_heads)
1398 return subset, list(updated_heads)
1399 else:
1399 else:
1400 return subset
1400 return subset
1401
1401
1402 def pull(self, remote, heads=None, force=False):
1402 def pull(self, remote, heads=None, force=False):
1403 lock = self.lock()
1403 lock = self.lock()
1404 try:
1404 try:
1405 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1405 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1406 force=force)
1406 force=force)
1407 if fetch == [nullid]:
1407 if fetch == [nullid]:
1408 self.ui.status(_("requesting all changes\n"))
1408 self.ui.status(_("requesting all changes\n"))
1409
1409
1410 if not fetch:
1410 if not fetch:
1411 self.ui.status(_("no changes found\n"))
1411 self.ui.status(_("no changes found\n"))
1412 return 0
1412 return 0
1413
1413
1414 if heads is None and remote.capable('changegroupsubset'):
1414 if heads is None and remote.capable('changegroupsubset'):
1415 heads = rheads
1415 heads = rheads
1416
1416
1417 if heads is None:
1417 if heads is None:
1418 cg = remote.changegroup(fetch, 'pull')
1418 cg = remote.changegroup(fetch, 'pull')
1419 else:
1419 else:
1420 if not remote.capable('changegroupsubset'):
1420 if not remote.capable('changegroupsubset'):
1421 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1421 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1422 cg = remote.changegroupsubset(fetch, heads, 'pull')
1422 cg = remote.changegroupsubset(fetch, heads, 'pull')
1423 return self.addchangegroup(cg, 'pull', remote.url())
1423 return self.addchangegroup(cg, 'pull', remote.url())
1424 finally:
1424 finally:
1425 lock.release()
1425 lock.release()
1426
1426
1427 def push(self, remote, force=False, revs=None):
1427 def push(self, remote, force=False, revs=None):
1428 # there are two ways to push to remote repo:
1428 # there are two ways to push to remote repo:
1429 #
1429 #
1430 # addchangegroup assumes local user can lock remote
1430 # addchangegroup assumes local user can lock remote
1431 # repo (local filesystem, old ssh servers).
1431 # repo (local filesystem, old ssh servers).
1432 #
1432 #
1433 # unbundle assumes local user cannot lock remote repo (new ssh
1433 # unbundle assumes local user cannot lock remote repo (new ssh
1434 # servers, http servers).
1434 # servers, http servers).
1435
1435
1436 if remote.capable('unbundle'):
1436 if remote.capable('unbundle'):
1437 return self.push_unbundle(remote, force, revs)
1437 return self.push_unbundle(remote, force, revs)
1438 return self.push_addchangegroup(remote, force, revs)
1438 return self.push_addchangegroup(remote, force, revs)
1439
1439
1440 def prepush(self, remote, force, revs):
1440 def prepush(self, remote, force, revs):
1441 common = {}
1441 common = {}
1442 remote_heads = remote.heads()
1442 remote_heads = remote.heads()
1443 inc = self.findincoming(remote, common, remote_heads, force=force)
1443 inc = self.findincoming(remote, common, remote_heads, force=force)
1444
1444
1445 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1445 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1446 if revs is not None:
1446 if revs is not None:
1447 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1447 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1448 else:
1448 else:
1449 bases, heads = update, self.changelog.heads()
1449 bases, heads = update, self.changelog.heads()
1450
1450
1451 if not bases:
1451 if not bases:
1452 self.ui.status(_("no changes found\n"))
1452 self.ui.status(_("no changes found\n"))
1453 return None, 1
1453 return None, 1
1454 elif not force:
1454 elif not force:
1455 # check if we're creating new remote heads
1455 # check if we're creating new remote heads
1456 # to be a remote head after push, node must be either
1456 # to be a remote head after push, node must be either
1457 # - unknown locally
1457 # - unknown locally
1458 # - a local outgoing head descended from update
1458 # - a local outgoing head descended from update
1459 # - a remote head that's known locally and not
1459 # - a remote head that's known locally and not
1460 # ancestral to an outgoing head
1460 # ancestral to an outgoing head
1461
1461
1462 warn = 0
1462 warn = 0
1463
1463
1464 if remote_heads == [nullid]:
1464 if remote_heads == [nullid]:
1465 warn = 0
1465 warn = 0
1466 elif not revs and len(heads) > len(remote_heads):
1466 elif not revs and len(heads) > len(remote_heads):
1467 warn = 1
1467 warn = 1
1468 else:
1468 else:
1469 newheads = list(heads)
1469 newheads = list(heads)
1470 for r in remote_heads:
1470 for r in remote_heads:
1471 if r in self.changelog.nodemap:
1471 if r in self.changelog.nodemap:
1472 desc = self.changelog.heads(r, heads)
1472 desc = self.changelog.heads(r, heads)
1473 l = [h for h in heads if h in desc]
1473 l = [h for h in heads if h in desc]
1474 if not l:
1474 if not l:
1475 newheads.append(r)
1475 newheads.append(r)
1476 else:
1476 else:
1477 newheads.append(r)
1477 newheads.append(r)
1478 if len(newheads) > len(remote_heads):
1478 if len(newheads) > len(remote_heads):
1479 warn = 1
1479 warn = 1
1480
1480
1481 if warn:
1481 if warn:
1482 self.ui.warn(_("abort: push creates new remote heads!\n"))
1482 self.ui.warn(_("abort: push creates new remote heads!\n"))
1483 self.ui.status(_("(did you forget to merge?"
1483 self.ui.status(_("(did you forget to merge?"
1484 " use push -f to force)\n"))
1484 " use push -f to force)\n"))
1485 return None, 0
1485 return None, 0
1486 elif inc:
1486 elif inc:
1487 self.ui.warn(_("note: unsynced remote changes!\n"))
1487 self.ui.warn(_("note: unsynced remote changes!\n"))
1488
1488
1489
1489
1490 if revs is None:
1490 if revs is None:
1491 # use the fast path, no race possible on push
1491 # use the fast path, no race possible on push
1492 cg = self._changegroup(common.keys(), 'push')
1492 cg = self._changegroup(common.keys(), 'push')
1493 else:
1493 else:
1494 cg = self.changegroupsubset(update, revs, 'push')
1494 cg = self.changegroupsubset(update, revs, 'push')
1495 return cg, remote_heads
1495 return cg, remote_heads
1496
1496
1497 def push_addchangegroup(self, remote, force, revs):
1497 def push_addchangegroup(self, remote, force, revs):
1498 lock = remote.lock()
1498 lock = remote.lock()
1499 try:
1499 try:
1500 ret = self.prepush(remote, force, revs)
1500 ret = self.prepush(remote, force, revs)
1501 if ret[0] is not None:
1501 if ret[0] is not None:
1502 cg, remote_heads = ret
1502 cg, remote_heads = ret
1503 return remote.addchangegroup(cg, 'push', self.url())
1503 return remote.addchangegroup(cg, 'push', self.url())
1504 return ret[1]
1504 return ret[1]
1505 finally:
1505 finally:
1506 lock.release()
1506 lock.release()
1507
1507
1508 def push_unbundle(self, remote, force, revs):
1508 def push_unbundle(self, remote, force, revs):
1509 # local repo finds heads on server, finds out what revs it
1509 # local repo finds heads on server, finds out what revs it
1510 # must push. once revs transferred, if server finds it has
1510 # must push. once revs transferred, if server finds it has
1511 # different heads (someone else won commit/push race), server
1511 # different heads (someone else won commit/push race), server
1512 # aborts.
1512 # aborts.
1513
1513
1514 ret = self.prepush(remote, force, revs)
1514 ret = self.prepush(remote, force, revs)
1515 if ret[0] is not None:
1515 if ret[0] is not None:
1516 cg, remote_heads = ret
1516 cg, remote_heads = ret
1517 if force: remote_heads = ['force']
1517 if force: remote_heads = ['force']
1518 return remote.unbundle(cg, remote_heads, 'push')
1518 return remote.unbundle(cg, remote_heads, 'push')
1519 return ret[1]
1519 return ret[1]
1520
1520
1521 def changegroupinfo(self, nodes, source):
1521 def changegroupinfo(self, nodes, source):
1522 if self.ui.verbose or source == 'bundle':
1522 if self.ui.verbose or source == 'bundle':
1523 self.ui.status(_("%d changesets found\n") % len(nodes))
1523 self.ui.status(_("%d changesets found\n") % len(nodes))
1524 if self.ui.debugflag:
1524 if self.ui.debugflag:
1525 self.ui.debug(_("list of changesets:\n"))
1525 self.ui.debug(_("list of changesets:\n"))
1526 for node in nodes:
1526 for node in nodes:
1527 self.ui.debug("%s\n" % hex(node))
1527 self.ui.debug("%s\n" % hex(node))
1528
1528
1529 def changegroupsubset(self, bases, heads, source, extranodes=None):
1529 def changegroupsubset(self, bases, heads, source, extranodes=None):
1530 """This function generates a changegroup consisting of all the nodes
1530 """This function generates a changegroup consisting of all the nodes
1531 that are descendents of any of the bases, and ancestors of any of
1531 that are descendents of any of the bases, and ancestors of any of
1532 the heads.
1532 the heads.
1533
1533
1534 It is fairly complex as determining which filenodes and which
1534 It is fairly complex as determining which filenodes and which
1535 manifest nodes need to be included for the changeset to be complete
1535 manifest nodes need to be included for the changeset to be complete
1536 is non-trivial.
1536 is non-trivial.
1537
1537
1538 Another wrinkle is doing the reverse, figuring out which changeset in
1538 Another wrinkle is doing the reverse, figuring out which changeset in
1539 the changegroup a particular filenode or manifestnode belongs to.
1539 the changegroup a particular filenode or manifestnode belongs to.
1540
1540
1541 The caller can specify some nodes that must be included in the
1541 The caller can specify some nodes that must be included in the
1542 changegroup using the extranodes argument. It should be a dict
1542 changegroup using the extranodes argument. It should be a dict
1543 where the keys are the filenames (or 1 for the manifest), and the
1543 where the keys are the filenames (or 1 for the manifest), and the
1544 values are lists of (node, linknode) tuples, where node is a wanted
1544 values are lists of (node, linknode) tuples, where node is a wanted
1545 node and linknode is the changelog node that should be transmitted as
1545 node and linknode is the changelog node that should be transmitted as
1546 the linkrev.
1546 the linkrev.
1547 """
1547 """
1548
1548
1549 if extranodes is None:
1549 if extranodes is None:
1550 # can we go through the fast path ?
1550 # can we go through the fast path ?
1551 heads.sort()
1551 heads.sort()
1552 allheads = self.heads()
1552 allheads = self.heads()
1553 allheads.sort()
1553 allheads.sort()
1554 if heads == allheads:
1554 if heads == allheads:
1555 common = []
1555 common = []
1556 # parents of bases are known from both sides
1556 # parents of bases are known from both sides
1557 for n in bases:
1557 for n in bases:
1558 for p in self.changelog.parents(n):
1558 for p in self.changelog.parents(n):
1559 if p != nullid:
1559 if p != nullid:
1560 common.append(p)
1560 common.append(p)
1561 return self._changegroup(common, source)
1561 return self._changegroup(common, source)
1562
1562
1563 self.hook('preoutgoing', throw=True, source=source)
1563 self.hook('preoutgoing', throw=True, source=source)
1564
1564
1565 # Set up some initial variables
1565 # Set up some initial variables
1566 # Make it easy to refer to self.changelog
1566 # Make it easy to refer to self.changelog
1567 cl = self.changelog
1567 cl = self.changelog
1568 # msng is short for missing - compute the list of changesets in this
1568 # msng is short for missing - compute the list of changesets in this
1569 # changegroup.
1569 # changegroup.
1570 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1570 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1571 self.changegroupinfo(msng_cl_lst, source)
1571 self.changegroupinfo(msng_cl_lst, source)
1572 # Some bases may turn out to be superfluous, and some heads may be
1572 # Some bases may turn out to be superfluous, and some heads may be
1573 # too. nodesbetween will return the minimal set of bases and heads
1573 # too. nodesbetween will return the minimal set of bases and heads
1574 # necessary to re-create the changegroup.
1574 # necessary to re-create the changegroup.
1575
1575
1576 # Known heads are the list of heads that it is assumed the recipient
1576 # Known heads are the list of heads that it is assumed the recipient
1577 # of this changegroup will know about.
1577 # of this changegroup will know about.
1578 knownheads = set()
1578 knownheads = set()
1579 # We assume that all parents of bases are known heads.
1579 # We assume that all parents of bases are known heads.
1580 for n in bases:
1580 for n in bases:
1581 for p in cl.parents(n):
1581 knownheads.update(cl.parents(n))
1582 if p != nullid:
1582 knownheads.discard(nullid)
1583 knownheads.add(p)
1584 knownheads = list(knownheads)
1583 knownheads = list(knownheads)
1585 if knownheads:
1584 if knownheads:
1586 # Now that we know what heads are known, we can compute which
1585 # Now that we know what heads are known, we can compute which
1587 # changesets are known. The recipient must know about all
1586 # changesets are known. The recipient must know about all
1588 # changesets required to reach the known heads from the null
1587 # changesets required to reach the known heads from the null
1589 # changeset.
1588 # changeset.
1590 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1589 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1591 junk = None
1590 junk = None
1592 # Transform the list into a set.
1591 # Transform the list into a set.
1593 has_cl_set = set(has_cl_set)
1592 has_cl_set = set(has_cl_set)
1594 else:
1593 else:
1595 # If there were no known heads, the recipient cannot be assumed to
1594 # If there were no known heads, the recipient cannot be assumed to
1596 # know about any changesets.
1595 # know about any changesets.
1597 has_cl_set = set()
1596 has_cl_set = set()
1598
1597
1599 # Make it easy to refer to self.manifest
1598 # Make it easy to refer to self.manifest
1600 mnfst = self.manifest
1599 mnfst = self.manifest
1601 # We don't know which manifests are missing yet
1600 # We don't know which manifests are missing yet
1602 msng_mnfst_set = {}
1601 msng_mnfst_set = {}
1603 # Nor do we know which filenodes are missing.
1602 # Nor do we know which filenodes are missing.
1604 msng_filenode_set = {}
1603 msng_filenode_set = {}
1605
1604
1606 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1605 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1607 junk = None
1606 junk = None
1608
1607
1609 # A changeset always belongs to itself, so the changenode lookup
1608 # A changeset always belongs to itself, so the changenode lookup
1610 # function for a changenode is identity.
1609 # function for a changenode is identity.
1611 def identity(x):
1610 def identity(x):
1612 return x
1611 return x
1613
1612
1614 # A function generating function. Sets up an environment for the
1613 # A function generating function. Sets up an environment for the
1615 # inner function.
1614 # inner function.
1616 def cmp_by_rev_func(revlog):
1615 def cmp_by_rev_func(revlog):
1617 # Compare two nodes by their revision number in the environment's
1616 # Compare two nodes by their revision number in the environment's
1618 # revision history. Since the revision number both represents the
1617 # revision history. Since the revision number both represents the
1619 # most efficient order to read the nodes in, and represents a
1618 # most efficient order to read the nodes in, and represents a
1620 # topological sorting of the nodes, this function is often useful.
1619 # topological sorting of the nodes, this function is often useful.
1621 def cmp_by_rev(a, b):
1620 def cmp_by_rev(a, b):
1622 return cmp(revlog.rev(a), revlog.rev(b))
1621 return cmp(revlog.rev(a), revlog.rev(b))
1623 return cmp_by_rev
1622 return cmp_by_rev
1624
1623
1625 # If we determine that a particular file or manifest node must be a
1624 # If we determine that a particular file or manifest node must be a
1626 # node that the recipient of the changegroup will already have, we can
1625 # node that the recipient of the changegroup will already have, we can
1627 # also assume the recipient will have all the parents. This function
1626 # also assume the recipient will have all the parents. This function
1628 # prunes them from the set of missing nodes.
1627 # prunes them from the set of missing nodes.
1629 def prune_parents(revlog, hasset, msngset):
1628 def prune_parents(revlog, hasset, msngset):
1630 haslst = list(hasset)
1629 haslst = list(hasset)
1631 haslst.sort(cmp_by_rev_func(revlog))
1630 haslst.sort(cmp_by_rev_func(revlog))
1632 for node in haslst:
1631 for node in haslst:
1633 parentlst = [p for p in revlog.parents(node) if p != nullid]
1632 parentlst = [p for p in revlog.parents(node) if p != nullid]
1634 while parentlst:
1633 while parentlst:
1635 n = parentlst.pop()
1634 n = parentlst.pop()
1636 if n not in hasset:
1635 if n not in hasset:
1637 hasset.add(n)
1636 hasset.add(n)
1638 p = [p for p in revlog.parents(n) if p != nullid]
1637 p = [p for p in revlog.parents(n) if p != nullid]
1639 parentlst.extend(p)
1638 parentlst.extend(p)
1640 for n in hasset:
1639 for n in hasset:
1641 msngset.pop(n, None)
1640 msngset.pop(n, None)
1642
1641
1643 # This is a function generating function used to set up an environment
1642 # This is a function generating function used to set up an environment
1644 # for the inner function to execute in.
1643 # for the inner function to execute in.
1645 def manifest_and_file_collector(changedfileset):
1644 def manifest_and_file_collector(changedfileset):
1646 # This is an information gathering function that gathers
1645 # This is an information gathering function that gathers
1647 # information from each changeset node that goes out as part of
1646 # information from each changeset node that goes out as part of
1648 # the changegroup. The information gathered is a list of which
1647 # the changegroup. The information gathered is a list of which
1649 # manifest nodes are potentially required (the recipient may
1648 # manifest nodes are potentially required (the recipient may
1650 # already have them) and total list of all files which were
1649 # already have them) and total list of all files which were
1651 # changed in any changeset in the changegroup.
1650 # changed in any changeset in the changegroup.
1652 #
1651 #
1653 # We also remember the first changenode we saw any manifest
1652 # We also remember the first changenode we saw any manifest
1654 # referenced by so we can later determine which changenode 'owns'
1653 # referenced by so we can later determine which changenode 'owns'
1655 # the manifest.
1654 # the manifest.
1656 def collect_manifests_and_files(clnode):
1655 def collect_manifests_and_files(clnode):
1657 c = cl.read(clnode)
1656 c = cl.read(clnode)
1658 for f in c[3]:
1657 for f in c[3]:
1659 # This is to make sure we only have one instance of each
1658 # This is to make sure we only have one instance of each
1660 # filename string for each filename.
1659 # filename string for each filename.
1661 changedfileset.setdefault(f, f)
1660 changedfileset.setdefault(f, f)
1662 msng_mnfst_set.setdefault(c[0], clnode)
1661 msng_mnfst_set.setdefault(c[0], clnode)
1663 return collect_manifests_and_files
1662 return collect_manifests_and_files
1664
1663
1665 # Figure out which manifest nodes (of the ones we think might be part
1664 # Figure out which manifest nodes (of the ones we think might be part
1666 # of the changegroup) the recipient must know about and remove them
1665 # of the changegroup) the recipient must know about and remove them
1667 # from the changegroup.
1666 # from the changegroup.
1668 def prune_manifests():
1667 def prune_manifests():
1669 has_mnfst_set = set()
1668 has_mnfst_set = set()
1670 for n in msng_mnfst_set:
1669 for n in msng_mnfst_set:
1671 # If a 'missing' manifest thinks it belongs to a changenode
1670 # If a 'missing' manifest thinks it belongs to a changenode
1672 # the recipient is assumed to have, obviously the recipient
1671 # the recipient is assumed to have, obviously the recipient
1673 # must have that manifest.
1672 # must have that manifest.
1674 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1673 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1675 if linknode in has_cl_set:
1674 if linknode in has_cl_set:
1676 has_mnfst_set.add(n)
1675 has_mnfst_set.add(n)
1677 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1676 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1678
1677
1679 # Use the information collected in collect_manifests_and_files to say
1678 # Use the information collected in collect_manifests_and_files to say
1680 # which changenode any manifestnode belongs to.
1679 # which changenode any manifestnode belongs to.
1681 def lookup_manifest_link(mnfstnode):
1680 def lookup_manifest_link(mnfstnode):
1682 return msng_mnfst_set[mnfstnode]
1681 return msng_mnfst_set[mnfstnode]
1683
1682
1684 # A function generating function that sets up the initial environment
1683 # A function generating function that sets up the initial environment
1685 # the inner function.
1684 # the inner function.
1686 def filenode_collector(changedfiles):
1685 def filenode_collector(changedfiles):
1687 next_rev = [0]
1686 next_rev = [0]
1688 # This gathers information from each manifestnode included in the
1687 # This gathers information from each manifestnode included in the
1689 # changegroup about which filenodes the manifest node references
1688 # changegroup about which filenodes the manifest node references
1690 # so we can include those in the changegroup too.
1689 # so we can include those in the changegroup too.
1691 #
1690 #
1692 # It also remembers which changenode each filenode belongs to. It
1691 # It also remembers which changenode each filenode belongs to. It
1693 # does this by assuming the a filenode belongs to the changenode
1692 # does this by assuming the a filenode belongs to the changenode
1694 # the first manifest that references it belongs to.
1693 # the first manifest that references it belongs to.
1695 def collect_msng_filenodes(mnfstnode):
1694 def collect_msng_filenodes(mnfstnode):
1696 r = mnfst.rev(mnfstnode)
1695 r = mnfst.rev(mnfstnode)
1697 if r == next_rev[0]:
1696 if r == next_rev[0]:
1698 # If the last rev we looked at was the one just previous,
1697 # If the last rev we looked at was the one just previous,
1699 # we only need to see a diff.
1698 # we only need to see a diff.
1700 deltamf = mnfst.readdelta(mnfstnode)
1699 deltamf = mnfst.readdelta(mnfstnode)
1701 # For each line in the delta
1700 # For each line in the delta
1702 for f, fnode in deltamf.iteritems():
1701 for f, fnode in deltamf.iteritems():
1703 f = changedfiles.get(f, None)
1702 f = changedfiles.get(f, None)
1704 # And if the file is in the list of files we care
1703 # And if the file is in the list of files we care
1705 # about.
1704 # about.
1706 if f is not None:
1705 if f is not None:
1707 # Get the changenode this manifest belongs to
1706 # Get the changenode this manifest belongs to
1708 clnode = msng_mnfst_set[mnfstnode]
1707 clnode = msng_mnfst_set[mnfstnode]
1709 # Create the set of filenodes for the file if
1708 # Create the set of filenodes for the file if
1710 # there isn't one already.
1709 # there isn't one already.
1711 ndset = msng_filenode_set.setdefault(f, {})
1710 ndset = msng_filenode_set.setdefault(f, {})
1712 # And set the filenode's changelog node to the
1711 # And set the filenode's changelog node to the
1713 # manifest's if it hasn't been set already.
1712 # manifest's if it hasn't been set already.
1714 ndset.setdefault(fnode, clnode)
1713 ndset.setdefault(fnode, clnode)
1715 else:
1714 else:
1716 # Otherwise we need a full manifest.
1715 # Otherwise we need a full manifest.
1717 m = mnfst.read(mnfstnode)
1716 m = mnfst.read(mnfstnode)
1718 # For every file in we care about.
1717 # For every file in we care about.
1719 for f in changedfiles:
1718 for f in changedfiles:
1720 fnode = m.get(f, None)
1719 fnode = m.get(f, None)
1721 # If it's in the manifest
1720 # If it's in the manifest
1722 if fnode is not None:
1721 if fnode is not None:
1723 # See comments above.
1722 # See comments above.
1724 clnode = msng_mnfst_set[mnfstnode]
1723 clnode = msng_mnfst_set[mnfstnode]
1725 ndset = msng_filenode_set.setdefault(f, {})
1724 ndset = msng_filenode_set.setdefault(f, {})
1726 ndset.setdefault(fnode, clnode)
1725 ndset.setdefault(fnode, clnode)
1727 # Remember the revision we hope to see next.
1726 # Remember the revision we hope to see next.
1728 next_rev[0] = r + 1
1727 next_rev[0] = r + 1
1729 return collect_msng_filenodes
1728 return collect_msng_filenodes
1730
1729
1731 # We have a list of filenodes we think we need for a file, lets remove
1730 # We have a list of filenodes we think we need for a file, lets remove
1732 # all those we know the recipient must have.
1731 # all those we know the recipient must have.
1733 def prune_filenodes(f, filerevlog):
1732 def prune_filenodes(f, filerevlog):
1734 msngset = msng_filenode_set[f]
1733 msngset = msng_filenode_set[f]
1735 hasset = set()
1734 hasset = set()
1736 # If a 'missing' filenode thinks it belongs to a changenode we
1735 # If a 'missing' filenode thinks it belongs to a changenode we
1737 # assume the recipient must have, then the recipient must have
1736 # assume the recipient must have, then the recipient must have
1738 # that filenode.
1737 # that filenode.
1739 for n in msngset:
1738 for n in msngset:
1740 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1739 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1741 if clnode in has_cl_set:
1740 if clnode in has_cl_set:
1742 hasset.add(n)
1741 hasset.add(n)
1743 prune_parents(filerevlog, hasset, msngset)
1742 prune_parents(filerevlog, hasset, msngset)
1744
1743
1745 # A function generator function that sets up the a context for the
1744 # A function generator function that sets up the a context for the
1746 # inner function.
1745 # inner function.
1747 def lookup_filenode_link_func(fname):
1746 def lookup_filenode_link_func(fname):
1748 msngset = msng_filenode_set[fname]
1747 msngset = msng_filenode_set[fname]
1749 # Lookup the changenode the filenode belongs to.
1748 # Lookup the changenode the filenode belongs to.
1750 def lookup_filenode_link(fnode):
1749 def lookup_filenode_link(fnode):
1751 return msngset[fnode]
1750 return msngset[fnode]
1752 return lookup_filenode_link
1751 return lookup_filenode_link
1753
1752
1754 # Add the nodes that were explicitly requested.
1753 # Add the nodes that were explicitly requested.
1755 def add_extra_nodes(name, nodes):
1754 def add_extra_nodes(name, nodes):
1756 if not extranodes or name not in extranodes:
1755 if not extranodes or name not in extranodes:
1757 return
1756 return
1758
1757
1759 for node, linknode in extranodes[name]:
1758 for node, linknode in extranodes[name]:
1760 if node not in nodes:
1759 if node not in nodes:
1761 nodes[node] = linknode
1760 nodes[node] = linknode
1762
1761
1763 # Now that we have all theses utility functions to help out and
1762 # Now that we have all theses utility functions to help out and
1764 # logically divide up the task, generate the group.
1763 # logically divide up the task, generate the group.
1765 def gengroup():
1764 def gengroup():
1766 # The set of changed files starts empty.
1765 # The set of changed files starts empty.
1767 changedfiles = {}
1766 changedfiles = {}
1768 # Create a changenode group generator that will call our functions
1767 # Create a changenode group generator that will call our functions
1769 # back to lookup the owning changenode and collect information.
1768 # back to lookup the owning changenode and collect information.
1770 group = cl.group(msng_cl_lst, identity,
1769 group = cl.group(msng_cl_lst, identity,
1771 manifest_and_file_collector(changedfiles))
1770 manifest_and_file_collector(changedfiles))
1772 for chnk in group:
1771 for chnk in group:
1773 yield chnk
1772 yield chnk
1774
1773
1775 # The list of manifests has been collected by the generator
1774 # The list of manifests has been collected by the generator
1776 # calling our functions back.
1775 # calling our functions back.
1777 prune_manifests()
1776 prune_manifests()
1778 add_extra_nodes(1, msng_mnfst_set)
1777 add_extra_nodes(1, msng_mnfst_set)
1779 msng_mnfst_lst = msng_mnfst_set.keys()
1778 msng_mnfst_lst = msng_mnfst_set.keys()
1780 # Sort the manifestnodes by revision number.
1779 # Sort the manifestnodes by revision number.
1781 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1780 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1782 # Create a generator for the manifestnodes that calls our lookup
1781 # Create a generator for the manifestnodes that calls our lookup
1783 # and data collection functions back.
1782 # and data collection functions back.
1784 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1783 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1785 filenode_collector(changedfiles))
1784 filenode_collector(changedfiles))
1786 for chnk in group:
1785 for chnk in group:
1787 yield chnk
1786 yield chnk
1788
1787
1789 # These are no longer needed, dereference and toss the memory for
1788 # These are no longer needed, dereference and toss the memory for
1790 # them.
1789 # them.
1791 msng_mnfst_lst = None
1790 msng_mnfst_lst = None
1792 msng_mnfst_set.clear()
1791 msng_mnfst_set.clear()
1793
1792
1794 if extranodes:
1793 if extranodes:
1795 for fname in extranodes:
1794 for fname in extranodes:
1796 if isinstance(fname, int):
1795 if isinstance(fname, int):
1797 continue
1796 continue
1798 msng_filenode_set.setdefault(fname, {})
1797 msng_filenode_set.setdefault(fname, {})
1799 changedfiles[fname] = 1
1798 changedfiles[fname] = 1
1800 # Go through all our files in order sorted by name.
1799 # Go through all our files in order sorted by name.
1801 for fname in sorted(changedfiles):
1800 for fname in sorted(changedfiles):
1802 filerevlog = self.file(fname)
1801 filerevlog = self.file(fname)
1803 if not len(filerevlog):
1802 if not len(filerevlog):
1804 raise util.Abort(_("empty or missing revlog for %s") % fname)
1803 raise util.Abort(_("empty or missing revlog for %s") % fname)
1805 # Toss out the filenodes that the recipient isn't really
1804 # Toss out the filenodes that the recipient isn't really
1806 # missing.
1805 # missing.
1807 if fname in msng_filenode_set:
1806 if fname in msng_filenode_set:
1808 prune_filenodes(fname, filerevlog)
1807 prune_filenodes(fname, filerevlog)
1809 add_extra_nodes(fname, msng_filenode_set[fname])
1808 add_extra_nodes(fname, msng_filenode_set[fname])
1810 msng_filenode_lst = msng_filenode_set[fname].keys()
1809 msng_filenode_lst = msng_filenode_set[fname].keys()
1811 else:
1810 else:
1812 msng_filenode_lst = []
1811 msng_filenode_lst = []
1813 # If any filenodes are left, generate the group for them,
1812 # If any filenodes are left, generate the group for them,
1814 # otherwise don't bother.
1813 # otherwise don't bother.
1815 if len(msng_filenode_lst) > 0:
1814 if len(msng_filenode_lst) > 0:
1816 yield changegroup.chunkheader(len(fname))
1815 yield changegroup.chunkheader(len(fname))
1817 yield fname
1816 yield fname
1818 # Sort the filenodes by their revision #
1817 # Sort the filenodes by their revision #
1819 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1818 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1820 # Create a group generator and only pass in a changenode
1819 # Create a group generator and only pass in a changenode
1821 # lookup function as we need to collect no information
1820 # lookup function as we need to collect no information
1822 # from filenodes.
1821 # from filenodes.
1823 group = filerevlog.group(msng_filenode_lst,
1822 group = filerevlog.group(msng_filenode_lst,
1824 lookup_filenode_link_func(fname))
1823 lookup_filenode_link_func(fname))
1825 for chnk in group:
1824 for chnk in group:
1826 yield chnk
1825 yield chnk
1827 if fname in msng_filenode_set:
1826 if fname in msng_filenode_set:
1828 # Don't need this anymore, toss it to free memory.
1827 # Don't need this anymore, toss it to free memory.
1829 del msng_filenode_set[fname]
1828 del msng_filenode_set[fname]
1830 # Signal that no more groups are left.
1829 # Signal that no more groups are left.
1831 yield changegroup.closechunk()
1830 yield changegroup.closechunk()
1832
1831
1833 if msng_cl_lst:
1832 if msng_cl_lst:
1834 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1833 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1835
1834
1836 return util.chunkbuffer(gengroup())
1835 return util.chunkbuffer(gengroup())
1837
1836
1838 def changegroup(self, basenodes, source):
1837 def changegroup(self, basenodes, source):
1839 # to avoid a race we use changegroupsubset() (issue1320)
1838 # to avoid a race we use changegroupsubset() (issue1320)
1840 return self.changegroupsubset(basenodes, self.heads(), source)
1839 return self.changegroupsubset(basenodes, self.heads(), source)
1841
1840
1842 def _changegroup(self, common, source):
1841 def _changegroup(self, common, source):
1843 """Generate a changegroup of all nodes that we have that a recipient
1842 """Generate a changegroup of all nodes that we have that a recipient
1844 doesn't.
1843 doesn't.
1845
1844
1846 This is much easier than the previous function as we can assume that
1845 This is much easier than the previous function as we can assume that
1847 the recipient has any changenode we aren't sending them.
1846 the recipient has any changenode we aren't sending them.
1848
1847
1849 common is the set of common nodes between remote and self"""
1848 common is the set of common nodes between remote and self"""
1850
1849
1851 self.hook('preoutgoing', throw=True, source=source)
1850 self.hook('preoutgoing', throw=True, source=source)
1852
1851
1853 cl = self.changelog
1852 cl = self.changelog
1854 nodes = cl.findmissing(common)
1853 nodes = cl.findmissing(common)
1855 revset = set([cl.rev(n) for n in nodes])
1854 revset = set([cl.rev(n) for n in nodes])
1856 self.changegroupinfo(nodes, source)
1855 self.changegroupinfo(nodes, source)
1857
1856
1858 def identity(x):
1857 def identity(x):
1859 return x
1858 return x
1860
1859
1861 def gennodelst(log):
1860 def gennodelst(log):
1862 for r in log:
1861 for r in log:
1863 if log.linkrev(r) in revset:
1862 if log.linkrev(r) in revset:
1864 yield log.node(r)
1863 yield log.node(r)
1865
1864
1866 def changed_file_collector(changedfileset):
1865 def changed_file_collector(changedfileset):
1867 def collect_changed_files(clnode):
1866 def collect_changed_files(clnode):
1868 c = cl.read(clnode)
1867 c = cl.read(clnode)
1869 for fname in c[3]:
1868 changedfileset.update(c[3])
1870 changedfileset.add(fname)
1871 return collect_changed_files
1869 return collect_changed_files
1872
1870
1873 def lookuprevlink_func(revlog):
1871 def lookuprevlink_func(revlog):
1874 def lookuprevlink(n):
1872 def lookuprevlink(n):
1875 return cl.node(revlog.linkrev(revlog.rev(n)))
1873 return cl.node(revlog.linkrev(revlog.rev(n)))
1876 return lookuprevlink
1874 return lookuprevlink
1877
1875
1878 def gengroup():
1876 def gengroup():
1879 # construct a list of all changed files
1877 # construct a list of all changed files
1880 changedfiles = set()
1878 changedfiles = set()
1881
1879
1882 for chnk in cl.group(nodes, identity,
1880 for chnk in cl.group(nodes, identity,
1883 changed_file_collector(changedfiles)):
1881 changed_file_collector(changedfiles)):
1884 yield chnk
1882 yield chnk
1885
1883
1886 mnfst = self.manifest
1884 mnfst = self.manifest
1887 nodeiter = gennodelst(mnfst)
1885 nodeiter = gennodelst(mnfst)
1888 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1886 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1889 yield chnk
1887 yield chnk
1890
1888
1891 for fname in sorted(changedfiles):
1889 for fname in sorted(changedfiles):
1892 filerevlog = self.file(fname)
1890 filerevlog = self.file(fname)
1893 if not len(filerevlog):
1891 if not len(filerevlog):
1894 raise util.Abort(_("empty or missing revlog for %s") % fname)
1892 raise util.Abort(_("empty or missing revlog for %s") % fname)
1895 nodeiter = gennodelst(filerevlog)
1893 nodeiter = gennodelst(filerevlog)
1896 nodeiter = list(nodeiter)
1894 nodeiter = list(nodeiter)
1897 if nodeiter:
1895 if nodeiter:
1898 yield changegroup.chunkheader(len(fname))
1896 yield changegroup.chunkheader(len(fname))
1899 yield fname
1897 yield fname
1900 lookup = lookuprevlink_func(filerevlog)
1898 lookup = lookuprevlink_func(filerevlog)
1901 for chnk in filerevlog.group(nodeiter, lookup):
1899 for chnk in filerevlog.group(nodeiter, lookup):
1902 yield chnk
1900 yield chnk
1903
1901
1904 yield changegroup.closechunk()
1902 yield changegroup.closechunk()
1905
1903
1906 if nodes:
1904 if nodes:
1907 self.hook('outgoing', node=hex(nodes[0]), source=source)
1905 self.hook('outgoing', node=hex(nodes[0]), source=source)
1908
1906
1909 return util.chunkbuffer(gengroup())
1907 return util.chunkbuffer(gengroup())
1910
1908
1911 def addchangegroup(self, source, srctype, url, emptyok=False):
1909 def addchangegroup(self, source, srctype, url, emptyok=False):
1912 """add changegroup to repo.
1910 """add changegroup to repo.
1913
1911
1914 return values:
1912 return values:
1915 - nothing changed or no source: 0
1913 - nothing changed or no source: 0
1916 - more heads than before: 1+added heads (2..n)
1914 - more heads than before: 1+added heads (2..n)
1917 - less heads than before: -1-removed heads (-2..-n)
1915 - less heads than before: -1-removed heads (-2..-n)
1918 - number of heads stays the same: 1
1916 - number of heads stays the same: 1
1919 """
1917 """
1920 def csmap(x):
1918 def csmap(x):
1921 self.ui.debug(_("add changeset %s\n") % short(x))
1919 self.ui.debug(_("add changeset %s\n") % short(x))
1922 return len(cl)
1920 return len(cl)
1923
1921
1924 def revmap(x):
1922 def revmap(x):
1925 return cl.rev(x)
1923 return cl.rev(x)
1926
1924
1927 if not source:
1925 if not source:
1928 return 0
1926 return 0
1929
1927
1930 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1928 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1931
1929
1932 changesets = files = revisions = 0
1930 changesets = files = revisions = 0
1933
1931
1934 # write changelog data to temp files so concurrent readers will not see
1932 # write changelog data to temp files so concurrent readers will not see
1935 # inconsistent view
1933 # inconsistent view
1936 cl = self.changelog
1934 cl = self.changelog
1937 cl.delayupdate()
1935 cl.delayupdate()
1938 oldheads = len(cl.heads())
1936 oldheads = len(cl.heads())
1939
1937
1940 tr = self.transaction()
1938 tr = self.transaction()
1941 try:
1939 try:
1942 trp = weakref.proxy(tr)
1940 trp = weakref.proxy(tr)
1943 # pull off the changeset group
1941 # pull off the changeset group
1944 self.ui.status(_("adding changesets\n"))
1942 self.ui.status(_("adding changesets\n"))
1945 clstart = len(cl)
1943 clstart = len(cl)
1946 chunkiter = changegroup.chunkiter(source)
1944 chunkiter = changegroup.chunkiter(source)
1947 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1945 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1948 raise util.Abort(_("received changelog group is empty"))
1946 raise util.Abort(_("received changelog group is empty"))
1949 clend = len(cl)
1947 clend = len(cl)
1950 changesets = clend - clstart
1948 changesets = clend - clstart
1951
1949
1952 # pull off the manifest group
1950 # pull off the manifest group
1953 self.ui.status(_("adding manifests\n"))
1951 self.ui.status(_("adding manifests\n"))
1954 chunkiter = changegroup.chunkiter(source)
1952 chunkiter = changegroup.chunkiter(source)
1955 # no need to check for empty manifest group here:
1953 # no need to check for empty manifest group here:
1956 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1954 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1957 # no new manifest will be created and the manifest group will
1955 # no new manifest will be created and the manifest group will
1958 # be empty during the pull
1956 # be empty during the pull
1959 self.manifest.addgroup(chunkiter, revmap, trp)
1957 self.manifest.addgroup(chunkiter, revmap, trp)
1960
1958
1961 # process the files
1959 # process the files
1962 self.ui.status(_("adding file changes\n"))
1960 self.ui.status(_("adding file changes\n"))
1963 while 1:
1961 while 1:
1964 f = changegroup.getchunk(source)
1962 f = changegroup.getchunk(source)
1965 if not f:
1963 if not f:
1966 break
1964 break
1967 self.ui.debug(_("adding %s revisions\n") % f)
1965 self.ui.debug(_("adding %s revisions\n") % f)
1968 fl = self.file(f)
1966 fl = self.file(f)
1969 o = len(fl)
1967 o = len(fl)
1970 chunkiter = changegroup.chunkiter(source)
1968 chunkiter = changegroup.chunkiter(source)
1971 if fl.addgroup(chunkiter, revmap, trp) is None:
1969 if fl.addgroup(chunkiter, revmap, trp) is None:
1972 raise util.Abort(_("received file revlog group is empty"))
1970 raise util.Abort(_("received file revlog group is empty"))
1973 revisions += len(fl) - o
1971 revisions += len(fl) - o
1974 files += 1
1972 files += 1
1975
1973
1976 newheads = len(cl.heads())
1974 newheads = len(cl.heads())
1977 heads = ""
1975 heads = ""
1978 if oldheads and newheads != oldheads:
1976 if oldheads and newheads != oldheads:
1979 heads = _(" (%+d heads)") % (newheads - oldheads)
1977 heads = _(" (%+d heads)") % (newheads - oldheads)
1980
1978
1981 self.ui.status(_("added %d changesets"
1979 self.ui.status(_("added %d changesets"
1982 " with %d changes to %d files%s\n")
1980 " with %d changes to %d files%s\n")
1983 % (changesets, revisions, files, heads))
1981 % (changesets, revisions, files, heads))
1984
1982
1985 if changesets > 0:
1983 if changesets > 0:
1986 p = lambda: cl.writepending() and self.root or ""
1984 p = lambda: cl.writepending() and self.root or ""
1987 self.hook('pretxnchangegroup', throw=True,
1985 self.hook('pretxnchangegroup', throw=True,
1988 node=hex(cl.node(clstart)), source=srctype,
1986 node=hex(cl.node(clstart)), source=srctype,
1989 url=url, pending=p)
1987 url=url, pending=p)
1990
1988
1991 # make changelog see real files again
1989 # make changelog see real files again
1992 cl.finalize(trp)
1990 cl.finalize(trp)
1993
1991
1994 tr.close()
1992 tr.close()
1995 finally:
1993 finally:
1996 del tr
1994 del tr
1997
1995
1998 if changesets > 0:
1996 if changesets > 0:
1999 # forcefully update the on-disk branch cache
1997 # forcefully update the on-disk branch cache
2000 self.ui.debug(_("updating the branch cache\n"))
1998 self.ui.debug(_("updating the branch cache\n"))
2001 self.branchtags()
1999 self.branchtags()
2002 self.hook("changegroup", node=hex(cl.node(clstart)),
2000 self.hook("changegroup", node=hex(cl.node(clstart)),
2003 source=srctype, url=url)
2001 source=srctype, url=url)
2004
2002
2005 for i in xrange(clstart, clend):
2003 for i in xrange(clstart, clend):
2006 self.hook("incoming", node=hex(cl.node(i)),
2004 self.hook("incoming", node=hex(cl.node(i)),
2007 source=srctype, url=url)
2005 source=srctype, url=url)
2008
2006
2009 # never return 0 here:
2007 # never return 0 here:
2010 if newheads < oldheads:
2008 if newheads < oldheads:
2011 return newheads - oldheads - 1
2009 return newheads - oldheads - 1
2012 else:
2010 else:
2013 return newheads - oldheads + 1
2011 return newheads - oldheads + 1
2014
2012
2015
2013
2016 def stream_in(self, remote):
2014 def stream_in(self, remote):
2017 fp = remote.stream_out()
2015 fp = remote.stream_out()
2018 l = fp.readline()
2016 l = fp.readline()
2019 try:
2017 try:
2020 resp = int(l)
2018 resp = int(l)
2021 except ValueError:
2019 except ValueError:
2022 raise error.ResponseError(
2020 raise error.ResponseError(
2023 _('Unexpected response from remote server:'), l)
2021 _('Unexpected response from remote server:'), l)
2024 if resp == 1:
2022 if resp == 1:
2025 raise util.Abort(_('operation forbidden by server'))
2023 raise util.Abort(_('operation forbidden by server'))
2026 elif resp == 2:
2024 elif resp == 2:
2027 raise util.Abort(_('locking the remote repository failed'))
2025 raise util.Abort(_('locking the remote repository failed'))
2028 elif resp != 0:
2026 elif resp != 0:
2029 raise util.Abort(_('the server sent an unknown error code'))
2027 raise util.Abort(_('the server sent an unknown error code'))
2030 self.ui.status(_('streaming all changes\n'))
2028 self.ui.status(_('streaming all changes\n'))
2031 l = fp.readline()
2029 l = fp.readline()
2032 try:
2030 try:
2033 total_files, total_bytes = map(int, l.split(' ', 1))
2031 total_files, total_bytes = map(int, l.split(' ', 1))
2034 except (ValueError, TypeError):
2032 except (ValueError, TypeError):
2035 raise error.ResponseError(
2033 raise error.ResponseError(
2036 _('Unexpected response from remote server:'), l)
2034 _('Unexpected response from remote server:'), l)
2037 self.ui.status(_('%d files to transfer, %s of data\n') %
2035 self.ui.status(_('%d files to transfer, %s of data\n') %
2038 (total_files, util.bytecount(total_bytes)))
2036 (total_files, util.bytecount(total_bytes)))
2039 start = time.time()
2037 start = time.time()
2040 for i in xrange(total_files):
2038 for i in xrange(total_files):
2041 # XXX doesn't support '\n' or '\r' in filenames
2039 # XXX doesn't support '\n' or '\r' in filenames
2042 l = fp.readline()
2040 l = fp.readline()
2043 try:
2041 try:
2044 name, size = l.split('\0', 1)
2042 name, size = l.split('\0', 1)
2045 size = int(size)
2043 size = int(size)
2046 except (ValueError, TypeError):
2044 except (ValueError, TypeError):
2047 raise error.ResponseError(
2045 raise error.ResponseError(
2048 _('Unexpected response from remote server:'), l)
2046 _('Unexpected response from remote server:'), l)
2049 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2047 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2050 ofp = self.sopener(name, 'w')
2048 ofp = self.sopener(name, 'w')
2051 for chunk in util.filechunkiter(fp, limit=size):
2049 for chunk in util.filechunkiter(fp, limit=size):
2052 ofp.write(chunk)
2050 ofp.write(chunk)
2053 ofp.close()
2051 ofp.close()
2054 elapsed = time.time() - start
2052 elapsed = time.time() - start
2055 if elapsed <= 0:
2053 if elapsed <= 0:
2056 elapsed = 0.001
2054 elapsed = 0.001
2057 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2055 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2058 (util.bytecount(total_bytes), elapsed,
2056 (util.bytecount(total_bytes), elapsed,
2059 util.bytecount(total_bytes / elapsed)))
2057 util.bytecount(total_bytes / elapsed)))
2060 self.invalidate()
2058 self.invalidate()
2061 return len(self.heads()) + 1
2059 return len(self.heads()) + 1
2062
2060
2063 def clone(self, remote, heads=[], stream=False):
2061 def clone(self, remote, heads=[], stream=False):
2064 '''clone remote repository.
2062 '''clone remote repository.
2065
2063
2066 keyword arguments:
2064 keyword arguments:
2067 heads: list of revs to clone (forces use of pull)
2065 heads: list of revs to clone (forces use of pull)
2068 stream: use streaming clone if possible'''
2066 stream: use streaming clone if possible'''
2069
2067
2070 # now, all clients that can request uncompressed clones can
2068 # now, all clients that can request uncompressed clones can
2071 # read repo formats supported by all servers that can serve
2069 # read repo formats supported by all servers that can serve
2072 # them.
2070 # them.
2073
2071
2074 # if revlog format changes, client will have to check version
2072 # if revlog format changes, client will have to check version
2075 # and format flags on "stream" capability, and use
2073 # and format flags on "stream" capability, and use
2076 # uncompressed only if compatible.
2074 # uncompressed only if compatible.
2077
2075
2078 if stream and not heads and remote.capable('stream'):
2076 if stream and not heads and remote.capable('stream'):
2079 return self.stream_in(remote)
2077 return self.stream_in(remote)
2080 return self.pull(remote, heads)
2078 return self.pull(remote, heads)
2081
2079
2082 # used to avoid circular references so destructors work
2080 # used to avoid circular references so destructors work
2083 def aftertrans(files):
2081 def aftertrans(files):
2084 renamefiles = [tuple(t) for t in files]
2082 renamefiles = [tuple(t) for t in files]
2085 def a():
2083 def a():
2086 for src, dest in renamefiles:
2084 for src, dest in renamefiles:
2087 util.rename(src, dest)
2085 util.rename(src, dest)
2088 return a
2086 return a
2089
2087
2090 def instance(ui, path, create):
2088 def instance(ui, path, create):
2091 return localrepository(ui, util.drop_scheme('file', path), create)
2089 return localrepository(ui, util.drop_scheme('file', path), create)
2092
2090
2093 def islocal(path):
2091 def islocal(path):
2094 return True
2092 return True
General Comments 0
You need to be logged in to leave comments. Login now