##// END OF EJS Templates
don't create the .hg/data at init time
Benoit Boissinot -
r3713:8ae88ed2 default
parent child Browse files
Show More
@@ -1,257 +1,256
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms
7 7 # of the GNU General Public License, incorporated herein by reference.
8 8
9 9 from node import *
10 10 from repo import *
11 11 from demandload import *
12 12 from i18n import gettext as _
13 13 demandload(globals(), "localrepo bundlerepo httprepo sshrepo statichttprepo")
14 14 demandload(globals(), "errno lock os shutil util merge@_merge verify@_verify")
15 15
16 16 def _local(path):
17 17 return (os.path.isfile(util.drop_scheme('file', path)) and
18 18 bundlerepo or localrepo)
19 19
20 20 schemes = {
21 21 'bundle': bundlerepo,
22 22 'file': _local,
23 23 'hg': httprepo,
24 24 'http': httprepo,
25 25 'https': httprepo,
26 26 'old-http': statichttprepo,
27 27 'ssh': sshrepo,
28 28 'static-http': statichttprepo,
29 29 }
30 30
31 31 def _lookup(path):
32 32 scheme = 'file'
33 33 if path:
34 34 c = path.find(':')
35 35 if c > 0:
36 36 scheme = path[:c]
37 37 thing = schemes.get(scheme) or schemes['file']
38 38 try:
39 39 return thing(path)
40 40 except TypeError:
41 41 return thing
42 42
43 43 def islocal(repo):
44 44 '''return true if repo or path is local'''
45 45 if isinstance(repo, str):
46 46 try:
47 47 return _lookup(repo).islocal(repo)
48 48 except AttributeError:
49 49 return False
50 50 return repo.local()
51 51
52 52 repo_setup_hooks = []
53 53
54 54 def repository(ui, path='', create=False):
55 55 """return a repository object for the specified path"""
56 56 repo = _lookup(path).instance(ui, path, create)
57 57 for hook in repo_setup_hooks:
58 58 hook(ui, repo)
59 59 return repo
60 60
61 61 def defaultdest(source):
62 62 '''return default destination of clone if none is given'''
63 63 return os.path.basename(os.path.normpath(source))
64 64
65 65 def clone(ui, source, dest=None, pull=False, rev=None, update=True,
66 66 stream=False):
67 67 """Make a copy of an existing repository.
68 68
69 69 Create a copy of an existing repository in a new directory. The
70 70 source and destination are URLs, as passed to the repository
71 71 function. Returns a pair of repository objects, the source and
72 72 newly created destination.
73 73
74 74 The location of the source is added to the new repository's
75 75 .hg/hgrc file, as the default to be used for future pulls and
76 76 pushes.
77 77
78 78 If an exception is raised, the partly cloned/updated destination
79 79 repository will be deleted.
80 80
81 81 Arguments:
82 82
83 83 source: repository object or URL
84 84
85 85 dest: URL of destination repository to create (defaults to base
86 86 name of source repository)
87 87
88 88 pull: always pull from source repository, even in local case
89 89
90 90 stream: stream raw data uncompressed from repository (fast over
91 91 LAN, slow over WAN)
92 92
93 93 rev: revision to clone up to (implies pull=True)
94 94
95 95 update: update working directory after clone completes, if
96 96 destination is local repository
97 97 """
98 98 if isinstance(source, str):
99 99 src_repo = repository(ui, source)
100 100 else:
101 101 src_repo = source
102 102 source = src_repo.url()
103 103
104 104 if dest is None:
105 105 dest = defaultdest(source)
106 106
107 107 def localpath(path):
108 108 if path.startswith('file://'):
109 109 return path[7:]
110 110 if path.startswith('file:'):
111 111 return path[5:]
112 112 return path
113 113
114 114 dest = localpath(dest)
115 115 source = localpath(source)
116 116
117 117 if os.path.exists(dest):
118 118 raise util.Abort(_("destination '%s' already exists") % dest)
119 119
120 120 class DirCleanup(object):
121 121 def __init__(self, dir_):
122 122 self.rmtree = shutil.rmtree
123 123 self.dir_ = dir_
124 124 def close(self):
125 125 self.dir_ = None
126 126 def __del__(self):
127 127 if self.dir_:
128 128 self.rmtree(self.dir_, True)
129 129
130 130 dest_repo = repository(ui, dest, create=True)
131 131
132 132 dest_path = None
133 133 dir_cleanup = None
134 134 if dest_repo.local():
135 135 dest_path = os.path.realpath(dest_repo.root)
136 136 dir_cleanup = DirCleanup(dest_path)
137 137
138 138 abspath = source
139 139 copy = False
140 140 if src_repo.local() and dest_repo.local():
141 141 abspath = os.path.abspath(source)
142 142 copy = not pull and not rev
143 143
144 144 src_lock, dest_lock = None, None
145 145 if copy:
146 146 try:
147 147 # we use a lock here because if we race with commit, we
148 148 # can end up with extra data in the cloned revlogs that's
149 149 # not pointed to by changesets, thus causing verify to
150 150 # fail
151 151 src_lock = src_repo.lock()
152 152 except lock.LockException:
153 153 copy = False
154 154
155 155 if copy:
156 156 # we lock here to avoid premature writing to the target
157 157 dest_lock = lock.lock(os.path.join(dest_path, ".hg", "lock"))
158 158
159 # we need to remove the (empty) data dir in dest so copyfiles
160 # can do its work
161 os.rmdir(os.path.join(dest_path, ".hg", "data"))
162 files = "data 00manifest.d 00manifest.i 00changelog.d 00changelog.i"
163 for f in files.split():
159 files = ("data",
160 "00manifest.d", "00manifest.i",
161 "00changelog.d", "00changelog.i")
162 for f in files:
164 163 src = os.path.join(source, ".hg", f)
165 164 dst = os.path.join(dest_path, ".hg", f)
166 165 try:
167 166 util.copyfiles(src, dst)
168 167 except OSError, inst:
169 168 if inst.errno != errno.ENOENT:
170 169 raise
171 170
172 171 # we need to re-init the repo after manually copying the data
173 172 # into it
174 173 dest_repo = repository(ui, dest)
175 174
176 175 else:
177 176 revs = None
178 177 if rev:
179 178 if 'lookup' not in src_repo.capabilities:
180 179 raise util.Abort(_("src repository does not support revision "
181 180 "lookup and so doesn't support clone by "
182 181 "revision"))
183 182 revs = [src_repo.lookup(r) for r in rev]
184 183
185 184 if dest_repo.local():
186 185 dest_repo.clone(src_repo, heads=revs, stream=stream)
187 186 elif src_repo.local():
188 187 src_repo.push(dest_repo, revs=revs)
189 188 else:
190 189 raise util.Abort(_("clone from remote to remote not supported"))
191 190
192 191 if src_lock:
193 192 src_lock.release()
194 193
195 194 if dest_repo.local():
196 195 fp = dest_repo.opener("hgrc", "w", text=True)
197 196 fp.write("[paths]\n")
198 197 fp.write("default = %s\n" % abspath)
199 198 fp.close()
200 199
201 200 if dest_lock:
202 201 dest_lock.release()
203 202
204 203 if update:
205 204 _update(dest_repo, dest_repo.changelog.tip())
206 205 if dir_cleanup:
207 206 dir_cleanup.close()
208 207
209 208 return src_repo, dest_repo
210 209
211 210 def _showstats(repo, stats):
212 211 stats = ((stats[0], _("updated")),
213 212 (stats[1], _("merged")),
214 213 (stats[2], _("removed")),
215 214 (stats[3], _("unresolved")))
216 215 note = ", ".join([_("%d files %s") % s for s in stats])
217 216 repo.ui.status("%s\n" % note)
218 217
219 218 def _update(repo, node): return update(repo, node)
220 219
221 220 def update(repo, node):
222 221 """update the working directory to node, merging linear changes"""
223 222 stats = _merge.update(repo, node, False, False, None, None)
224 223 _showstats(repo, stats)
225 224 if stats[3]:
226 225 repo.ui.status(_("There are unresolved merges with"
227 226 " locally modified files.\n"))
228 227 return stats[3]
229 228
230 229 def clean(repo, node, wlock=None, show_stats=True):
231 230 """forcibly switch the working directory to node, clobbering changes"""
232 231 stats = _merge.update(repo, node, False, True, None, wlock)
233 232 if show_stats: _showstats(repo, stats)
234 233 return stats[3]
235 234
236 235 def merge(repo, node, force=None, remind=True, wlock=None):
237 236 """branch merge with node, resolving changes"""
238 237 stats = _merge.update(repo, node, True, force, False, wlock)
239 238 _showstats(repo, stats)
240 239 if stats[3]:
241 240 pl = repo.parents()
242 241 repo.ui.status(_("There are unresolved merges,"
243 242 " you can redo the full merge using:\n"
244 243 " hg update -C %s\n"
245 244 " hg merge %s\n")
246 245 % (pl[0].rev(), pl[1].rev()))
247 246 elif remind:
248 247 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
249 248 return stats[3]
250 249
251 250 def revert(repo, node, choose, wlock):
252 251 """revert changes to revision in node without updating dirstate"""
253 252 return _merge.update(repo, node, False, True, choose, wlock)[3]
254 253
255 254 def verify(repo):
256 255 """verify the consistency of a repository"""
257 256 return _verify.verify(repo)
@@ -1,1896 +1,1895
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import gettext as _
10 10 from demandload import *
11 11 import repo
12 12 demandload(globals(), "appendfile changegroup")
13 13 demandload(globals(), "changelog dirstate filelog manifest context")
14 14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 15 demandload(globals(), "os revlog time util")
16 16
17 17 class localrepository(repo.repository):
18 18 capabilities = ('lookup', 'changegroupsubset')
19 19
20 20 def __del__(self):
21 21 self.transhandle = None
22 22 def __init__(self, parentui, path=None, create=0):
23 23 repo.repository.__init__(self)
24 24 if not path:
25 25 p = os.getcwd()
26 26 while not os.path.isdir(os.path.join(p, ".hg")):
27 27 oldp = p
28 28 p = os.path.dirname(p)
29 29 if p == oldp:
30 30 raise repo.RepoError(_("There is no Mercurial repository"
31 31 " here (.hg not found)"))
32 32 path = p
33 33 self.path = os.path.join(path, ".hg")
34 34
35 35 if not os.path.isdir(self.path):
36 36 if create:
37 37 if not os.path.exists(path):
38 38 os.mkdir(path)
39 39 os.mkdir(self.path)
40 os.mkdir(self.join("data"))
41 40 else:
42 41 raise repo.RepoError(_("repository %s not found") % path)
43 42 elif create:
44 43 raise repo.RepoError(_("repository %s already exists") % path)
45 44
46 45 self.root = os.path.realpath(path)
47 46 self.origroot = path
48 47 self.ui = ui.ui(parentui=parentui)
49 48 self.opener = util.opener(self.path)
50 49 self.sopener = util.opener(self.path)
51 50 self.wopener = util.opener(self.root)
52 51
53 52 try:
54 53 self.ui.readconfig(self.join("hgrc"), self.root)
55 54 except IOError:
56 55 pass
57 56
58 57 v = self.ui.configrevlog()
59 58 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
60 59 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
61 60 fl = v.get('flags', None)
62 61 flags = 0
63 62 if fl != None:
64 63 for x in fl.split():
65 64 flags |= revlog.flagstr(x)
66 65 elif self.revlogv1:
67 66 flags = revlog.REVLOG_DEFAULT_FLAGS
68 67
69 68 v = self.revlogversion | flags
70 69 self.manifest = manifest.manifest(self.sopener, v)
71 70 self.changelog = changelog.changelog(self.sopener, v)
72 71
73 72 # the changelog might not have the inline index flag
74 73 # on. If the format of the changelog is the same as found in
75 74 # .hgrc, apply any flags found in the .hgrc as well.
76 75 # Otherwise, just version from the changelog
77 76 v = self.changelog.version
78 77 if v == self.revlogversion:
79 78 v |= flags
80 79 self.revlogversion = v
81 80
82 81 self.tagscache = None
83 82 self.branchcache = None
84 83 self.nodetagscache = None
85 84 self.encodepats = None
86 85 self.decodepats = None
87 86 self.transhandle = None
88 87
89 88 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
90 89
91 90 def url(self):
92 91 return 'file:' + self.root
93 92
94 93 def hook(self, name, throw=False, **args):
95 94 def callhook(hname, funcname):
96 95 '''call python hook. hook is callable object, looked up as
97 96 name in python module. if callable returns "true", hook
98 97 fails, else passes. if hook raises exception, treated as
99 98 hook failure. exception propagates if throw is "true".
100 99
101 100 reason for "true" meaning "hook failed" is so that
102 101 unmodified commands (e.g. mercurial.commands.update) can
103 102 be run as hooks without wrappers to convert return values.'''
104 103
105 104 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
106 105 d = funcname.rfind('.')
107 106 if d == -1:
108 107 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
109 108 % (hname, funcname))
110 109 modname = funcname[:d]
111 110 try:
112 111 obj = __import__(modname)
113 112 except ImportError:
114 113 try:
115 114 # extensions are loaded with hgext_ prefix
116 115 obj = __import__("hgext_%s" % modname)
117 116 except ImportError:
118 117 raise util.Abort(_('%s hook is invalid '
119 118 '(import of "%s" failed)') %
120 119 (hname, modname))
121 120 try:
122 121 for p in funcname.split('.')[1:]:
123 122 obj = getattr(obj, p)
124 123 except AttributeError, err:
125 124 raise util.Abort(_('%s hook is invalid '
126 125 '("%s" is not defined)') %
127 126 (hname, funcname))
128 127 if not callable(obj):
129 128 raise util.Abort(_('%s hook is invalid '
130 129 '("%s" is not callable)') %
131 130 (hname, funcname))
132 131 try:
133 132 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
134 133 except (KeyboardInterrupt, util.SignalInterrupt):
135 134 raise
136 135 except Exception, exc:
137 136 if isinstance(exc, util.Abort):
138 137 self.ui.warn(_('error: %s hook failed: %s\n') %
139 138 (hname, exc.args[0]))
140 139 else:
141 140 self.ui.warn(_('error: %s hook raised an exception: '
142 141 '%s\n') % (hname, exc))
143 142 if throw:
144 143 raise
145 144 self.ui.print_exc()
146 145 return True
147 146 if r:
148 147 if throw:
149 148 raise util.Abort(_('%s hook failed') % hname)
150 149 self.ui.warn(_('warning: %s hook failed\n') % hname)
151 150 return r
152 151
153 152 def runhook(name, cmd):
154 153 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
155 154 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
156 155 r = util.system(cmd, environ=env, cwd=self.root)
157 156 if r:
158 157 desc, r = util.explain_exit(r)
159 158 if throw:
160 159 raise util.Abort(_('%s hook %s') % (name, desc))
161 160 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
162 161 return r
163 162
164 163 r = False
165 164 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
166 165 if hname.split(".", 1)[0] == name and cmd]
167 166 hooks.sort()
168 167 for hname, cmd in hooks:
169 168 if cmd.startswith('python:'):
170 169 r = callhook(hname, cmd[7:].strip()) or r
171 170 else:
172 171 r = runhook(hname, cmd) or r
173 172 return r
174 173
175 174 tag_disallowed = ':\r\n'
176 175
177 176 def tag(self, name, node, message, local, user, date):
178 177 '''tag a revision with a symbolic name.
179 178
180 179 if local is True, the tag is stored in a per-repository file.
181 180 otherwise, it is stored in the .hgtags file, and a new
182 181 changeset is committed with the change.
183 182
184 183 keyword arguments:
185 184
186 185 local: whether to store tag in non-version-controlled file
187 186 (default False)
188 187
189 188 message: commit message to use if committing
190 189
191 190 user: name of user to use if committing
192 191
193 192 date: date tuple to use if committing'''
194 193
195 194 for c in self.tag_disallowed:
196 195 if c in name:
197 196 raise util.Abort(_('%r cannot be used in a tag name') % c)
198 197
199 198 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
200 199
201 200 if local:
202 201 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
203 202 self.hook('tag', node=hex(node), tag=name, local=local)
204 203 return
205 204
206 205 for x in self.status()[:5]:
207 206 if '.hgtags' in x:
208 207 raise util.Abort(_('working copy of .hgtags is changed '
209 208 '(please commit .hgtags manually)'))
210 209
211 210 self.wfile('.hgtags', 'ab').write('%s %s\n' % (hex(node), name))
212 211 if self.dirstate.state('.hgtags') == '?':
213 212 self.add(['.hgtags'])
214 213
215 214 self.commit(['.hgtags'], message, user, date)
216 215 self.hook('tag', node=hex(node), tag=name, local=local)
217 216
218 217 def tags(self):
219 218 '''return a mapping of tag to node'''
220 219 if not self.tagscache:
221 220 self.tagscache = {}
222 221
223 222 def parsetag(line, context):
224 223 if not line:
225 224 return
226 225 s = l.split(" ", 1)
227 226 if len(s) != 2:
228 227 self.ui.warn(_("%s: cannot parse entry\n") % context)
229 228 return
230 229 node, key = s
231 230 key = key.strip()
232 231 try:
233 232 bin_n = bin(node)
234 233 except TypeError:
235 234 self.ui.warn(_("%s: node '%s' is not well formed\n") %
236 235 (context, node))
237 236 return
238 237 if bin_n not in self.changelog.nodemap:
239 238 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
240 239 (context, key))
241 240 return
242 241 self.tagscache[key] = bin_n
243 242
244 243 # read the tags file from each head, ending with the tip,
245 244 # and add each tag found to the map, with "newer" ones
246 245 # taking precedence
247 246 f = None
248 247 for rev, node, fnode in self._hgtagsnodes():
249 248 f = (f and f.filectx(fnode) or
250 249 self.filectx('.hgtags', fileid=fnode))
251 250 count = 0
252 251 for l in f.data().splitlines():
253 252 count += 1
254 253 parsetag(l, _("%s, line %d") % (str(f), count))
255 254
256 255 try:
257 256 f = self.opener("localtags")
258 257 count = 0
259 258 for l in f:
260 259 count += 1
261 260 parsetag(l, _("localtags, line %d") % count)
262 261 except IOError:
263 262 pass
264 263
265 264 self.tagscache['tip'] = self.changelog.tip()
266 265
267 266 return self.tagscache
268 267
269 268 def _hgtagsnodes(self):
270 269 heads = self.heads()
271 270 heads.reverse()
272 271 last = {}
273 272 ret = []
274 273 for node in heads:
275 274 c = self.changectx(node)
276 275 rev = c.rev()
277 276 try:
278 277 fnode = c.filenode('.hgtags')
279 278 except repo.LookupError:
280 279 continue
281 280 ret.append((rev, node, fnode))
282 281 if fnode in last:
283 282 ret[last[fnode]] = None
284 283 last[fnode] = len(ret) - 1
285 284 return [item for item in ret if item]
286 285
287 286 def tagslist(self):
288 287 '''return a list of tags ordered by revision'''
289 288 l = []
290 289 for t, n in self.tags().items():
291 290 try:
292 291 r = self.changelog.rev(n)
293 292 except:
294 293 r = -2 # sort to the beginning of the list if unknown
295 294 l.append((r, t, n))
296 295 l.sort()
297 296 return [(t, n) for r, t, n in l]
298 297
299 298 def nodetags(self, node):
300 299 '''return the tags associated with a node'''
301 300 if not self.nodetagscache:
302 301 self.nodetagscache = {}
303 302 for t, n in self.tags().items():
304 303 self.nodetagscache.setdefault(n, []).append(t)
305 304 return self.nodetagscache.get(node, [])
306 305
307 306 def branchtags(self):
308 307 if self.branchcache != None:
309 308 return self.branchcache
310 309
311 310 self.branchcache = {} # avoid recursion in changectx
312 311
313 312 partial, last, lrev = self._readbranchcache()
314 313
315 314 tiprev = self.changelog.count() - 1
316 315 if lrev != tiprev:
317 316 self._updatebranchcache(partial, lrev+1, tiprev+1)
318 317 self._writebranchcache(partial, self.changelog.tip(), tiprev)
319 318
320 319 self.branchcache = partial
321 320 return self.branchcache
322 321
323 322 def _readbranchcache(self):
324 323 partial = {}
325 324 try:
326 325 f = self.opener("branches.cache")
327 326 lines = f.read().split('\n')
328 327 f.close()
329 328 last, lrev = lines.pop(0).rstrip().split(" ", 1)
330 329 last, lrev = bin(last), int(lrev)
331 330 if (lrev < self.changelog.count() and
332 331 self.changelog.node(lrev) == last): # sanity check
333 332 for l in lines:
334 333 if not l: continue
335 334 node, label = l.rstrip().split(" ", 1)
336 335 partial[label] = bin(node)
337 336 else: # invalidate the cache
338 337 last, lrev = nullid, nullrev
339 338 except IOError:
340 339 last, lrev = nullid, nullrev
341 340 return partial, last, lrev
342 341
343 342 def _writebranchcache(self, branches, tip, tiprev):
344 343 try:
345 344 f = self.opener("branches.cache", "w")
346 345 f.write("%s %s\n" % (hex(tip), tiprev))
347 346 for label, node in branches.iteritems():
348 347 f.write("%s %s\n" % (hex(node), label))
349 348 except IOError:
350 349 pass
351 350
352 351 def _updatebranchcache(self, partial, start, end):
353 352 for r in xrange(start, end):
354 353 c = self.changectx(r)
355 354 b = c.branch()
356 355 if b:
357 356 partial[b] = c.node()
358 357
359 358 def lookup(self, key):
360 359 if key == '.':
361 360 key = self.dirstate.parents()[0]
362 361 if key == nullid:
363 362 raise repo.RepoError(_("no revision checked out"))
364 363 n = self.changelog._match(key)
365 364 if n:
366 365 return n
367 366 if key in self.tags():
368 367 return self.tags()[key]
369 368 if key in self.branchtags():
370 369 return self.branchtags()[key]
371 370 n = self.changelog._partialmatch(key)
372 371 if n:
373 372 return n
374 373 raise repo.RepoError(_("unknown revision '%s'") % key)
375 374
376 375 def dev(self):
377 376 return os.lstat(self.path).st_dev
378 377
379 378 def local(self):
380 379 return True
381 380
382 381 def join(self, f):
383 382 return os.path.join(self.path, f)
384 383
385 384 def sjoin(self, f):
386 385 return os.path.join(self.path, f)
387 386
388 387 def wjoin(self, f):
389 388 return os.path.join(self.root, f)
390 389
391 390 def file(self, f):
392 391 if f[0] == '/':
393 392 f = f[1:]
394 393 return filelog.filelog(self.sopener, f, self.revlogversion)
395 394
396 395 def changectx(self, changeid=None):
397 396 return context.changectx(self, changeid)
398 397
399 398 def workingctx(self):
400 399 return context.workingctx(self)
401 400
402 401 def parents(self, changeid=None):
403 402 '''
404 403 get list of changectxs for parents of changeid or working directory
405 404 '''
406 405 if changeid is None:
407 406 pl = self.dirstate.parents()
408 407 else:
409 408 n = self.changelog.lookup(changeid)
410 409 pl = self.changelog.parents(n)
411 410 if pl[1] == nullid:
412 411 return [self.changectx(pl[0])]
413 412 return [self.changectx(pl[0]), self.changectx(pl[1])]
414 413
415 414 def filectx(self, path, changeid=None, fileid=None):
416 415 """changeid can be a changeset revision, node, or tag.
417 416 fileid can be a file revision or node."""
418 417 return context.filectx(self, path, changeid, fileid)
419 418
420 419 def getcwd(self):
421 420 return self.dirstate.getcwd()
422 421
423 422 def wfile(self, f, mode='r'):
424 423 return self.wopener(f, mode)
425 424
426 425 def wread(self, filename):
427 426 if self.encodepats == None:
428 427 l = []
429 428 for pat, cmd in self.ui.configitems("encode"):
430 429 mf = util.matcher(self.root, "", [pat], [], [])[1]
431 430 l.append((mf, cmd))
432 431 self.encodepats = l
433 432
434 433 data = self.wopener(filename, 'r').read()
435 434
436 435 for mf, cmd in self.encodepats:
437 436 if mf(filename):
438 437 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
439 438 data = util.filter(data, cmd)
440 439 break
441 440
442 441 return data
443 442
444 443 def wwrite(self, filename, data, fd=None):
445 444 if self.decodepats == None:
446 445 l = []
447 446 for pat, cmd in self.ui.configitems("decode"):
448 447 mf = util.matcher(self.root, "", [pat], [], [])[1]
449 448 l.append((mf, cmd))
450 449 self.decodepats = l
451 450
452 451 for mf, cmd in self.decodepats:
453 452 if mf(filename):
454 453 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
455 454 data = util.filter(data, cmd)
456 455 break
457 456
458 457 if fd:
459 458 return fd.write(data)
460 459 return self.wopener(filename, 'w').write(data)
461 460
462 461 def transaction(self):
463 462 tr = self.transhandle
464 463 if tr != None and tr.running():
465 464 return tr.nest()
466 465
467 466 # save dirstate for rollback
468 467 try:
469 468 ds = self.opener("dirstate").read()
470 469 except IOError:
471 470 ds = ""
472 471 self.opener("journal.dirstate", "w").write(ds)
473 472
474 473 tr = transaction.transaction(self.ui.warn, self.sopener,
475 474 self.sjoin("journal"),
476 475 aftertrans(self.path))
477 476 self.transhandle = tr
478 477 return tr
479 478
480 479 def recover(self):
481 480 l = self.lock()
482 481 if os.path.exists(self.sjoin("journal")):
483 482 self.ui.status(_("rolling back interrupted transaction\n"))
484 483 transaction.rollback(self.sopener, self.sjoin("journal"))
485 484 self.reload()
486 485 return True
487 486 else:
488 487 self.ui.warn(_("no interrupted transaction available\n"))
489 488 return False
490 489
491 490 def rollback(self, wlock=None):
492 491 if not wlock:
493 492 wlock = self.wlock()
494 493 l = self.lock()
495 494 if os.path.exists(self.sjoin("undo")):
496 495 self.ui.status(_("rolling back last transaction\n"))
497 496 transaction.rollback(self.sopener, self.sjoin("undo"))
498 497 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
499 498 self.reload()
500 499 self.wreload()
501 500 else:
502 501 self.ui.warn(_("no rollback information available\n"))
503 502
504 503 def wreload(self):
505 504 self.dirstate.read()
506 505
507 506 def reload(self):
508 507 self.changelog.load()
509 508 self.manifest.load()
510 509 self.tagscache = None
511 510 self.nodetagscache = None
512 511
513 512 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
514 513 desc=None):
515 514 try:
516 515 l = lock.lock(lockname, 0, releasefn, desc=desc)
517 516 except lock.LockHeld, inst:
518 517 if not wait:
519 518 raise
520 519 self.ui.warn(_("waiting for lock on %s held by %r\n") %
521 520 (desc, inst.locker))
522 521 # default to 600 seconds timeout
523 522 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
524 523 releasefn, desc=desc)
525 524 if acquirefn:
526 525 acquirefn()
527 526 return l
528 527
529 528 def lock(self, wait=1):
530 529 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
531 530 desc=_('repository %s') % self.origroot)
532 531
533 532 def wlock(self, wait=1):
534 533 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
535 534 self.wreload,
536 535 desc=_('working directory of %s') % self.origroot)
537 536
538 537 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
539 538 """
540 539 commit an individual file as part of a larger transaction
541 540 """
542 541
543 542 t = self.wread(fn)
544 543 fl = self.file(fn)
545 544 fp1 = manifest1.get(fn, nullid)
546 545 fp2 = manifest2.get(fn, nullid)
547 546
548 547 meta = {}
549 548 cp = self.dirstate.copied(fn)
550 549 if cp:
551 550 meta["copy"] = cp
552 551 if not manifest2: # not a branch merge
553 552 meta["copyrev"] = hex(manifest1.get(cp, nullid))
554 553 fp2 = nullid
555 554 elif fp2 != nullid: # copied on remote side
556 555 meta["copyrev"] = hex(manifest1.get(cp, nullid))
557 556 else: # copied on local side, reversed
558 557 meta["copyrev"] = hex(manifest2.get(cp))
559 558 fp2 = nullid
560 559 self.ui.debug(_(" %s: copy %s:%s\n") %
561 560 (fn, cp, meta["copyrev"]))
562 561 fp1 = nullid
563 562 elif fp2 != nullid:
564 563 # is one parent an ancestor of the other?
565 564 fpa = fl.ancestor(fp1, fp2)
566 565 if fpa == fp1:
567 566 fp1, fp2 = fp2, nullid
568 567 elif fpa == fp2:
569 568 fp2 = nullid
570 569
571 570 # is the file unmodified from the parent? report existing entry
572 571 if fp2 == nullid and not fl.cmp(fp1, t):
573 572 return fp1
574 573
575 574 changelist.append(fn)
576 575 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
577 576
578 577 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
579 578 if p1 is None:
580 579 p1, p2 = self.dirstate.parents()
581 580 return self.commit(files=files, text=text, user=user, date=date,
582 581 p1=p1, p2=p2, wlock=wlock)
583 582
584 583 def commit(self, files=None, text="", user=None, date=None,
585 584 match=util.always, force=False, lock=None, wlock=None,
586 585 force_editor=False, p1=None, p2=None, extra={}):
587 586
588 587 commit = []
589 588 remove = []
590 589 changed = []
591 590 use_dirstate = (p1 is None) # not rawcommit
592 591 extra = extra.copy()
593 592
594 593 if use_dirstate:
595 594 if files:
596 595 for f in files:
597 596 s = self.dirstate.state(f)
598 597 if s in 'nmai':
599 598 commit.append(f)
600 599 elif s == 'r':
601 600 remove.append(f)
602 601 else:
603 602 self.ui.warn(_("%s not tracked!\n") % f)
604 603 else:
605 604 changes = self.status(match=match)[:5]
606 605 modified, added, removed, deleted, unknown = changes
607 606 commit = modified + added
608 607 remove = removed
609 608 else:
610 609 commit = files
611 610
612 611 if use_dirstate:
613 612 p1, p2 = self.dirstate.parents()
614 613 update_dirstate = True
615 614 else:
616 615 p1, p2 = p1, p2 or nullid
617 616 update_dirstate = (self.dirstate.parents()[0] == p1)
618 617
619 618 c1 = self.changelog.read(p1)
620 619 c2 = self.changelog.read(p2)
621 620 m1 = self.manifest.read(c1[0]).copy()
622 621 m2 = self.manifest.read(c2[0])
623 622
624 623 if use_dirstate:
625 624 branchname = self.workingctx().branch()
626 625 else:
627 626 branchname = ""
628 627
629 628 if use_dirstate:
630 629 oldname = c1[5].get("branch", "")
631 630 if not commit and not remove and not force and p2 == nullid and \
632 631 branchname == oldname:
633 632 self.ui.status(_("nothing changed\n"))
634 633 return None
635 634
636 635 xp1 = hex(p1)
637 636 if p2 == nullid: xp2 = ''
638 637 else: xp2 = hex(p2)
639 638
640 639 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
641 640
642 641 if not wlock:
643 642 wlock = self.wlock()
644 643 if not lock:
645 644 lock = self.lock()
646 645 tr = self.transaction()
647 646
648 647 # check in files
649 648 new = {}
650 649 linkrev = self.changelog.count()
651 650 commit.sort()
652 651 for f in commit:
653 652 self.ui.note(f + "\n")
654 653 try:
655 654 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
656 655 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
657 656 except IOError:
658 657 if use_dirstate:
659 658 self.ui.warn(_("trouble committing %s!\n") % f)
660 659 raise
661 660 else:
662 661 remove.append(f)
663 662
664 663 # update manifest
665 664 m1.update(new)
666 665 remove.sort()
667 666
668 667 for f in remove:
669 668 if f in m1:
670 669 del m1[f]
671 670 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
672 671
673 672 # add changeset
674 673 new = new.keys()
675 674 new.sort()
676 675
677 676 user = user or self.ui.username()
678 677 if not text or force_editor:
679 678 edittext = []
680 679 if text:
681 680 edittext.append(text)
682 681 edittext.append("")
683 682 if p2 != nullid:
684 683 edittext.append("HG: branch merge")
685 684 edittext.extend(["HG: changed %s" % f for f in changed])
686 685 edittext.extend(["HG: removed %s" % f for f in remove])
687 686 if not changed and not remove:
688 687 edittext.append("HG: no files changed")
689 688 edittext.append("")
690 689 # run editor in the repository root
691 690 olddir = os.getcwd()
692 691 os.chdir(self.root)
693 692 text = self.ui.edit("\n".join(edittext), user)
694 693 os.chdir(olddir)
695 694
696 695 lines = [line.rstrip() for line in text.rstrip().splitlines()]
697 696 while lines and not lines[0]:
698 697 del lines[0]
699 698 if not lines:
700 699 return None
701 700 text = '\n'.join(lines)
702 701 if branchname:
703 702 extra["branch"] = branchname
704 703 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
705 704 user, date, extra)
706 705 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
707 706 parent2=xp2)
708 707 tr.close()
709 708
710 709 if use_dirstate or update_dirstate:
711 710 self.dirstate.setparents(n)
712 711 if use_dirstate:
713 712 self.dirstate.update(new, "n")
714 713 self.dirstate.forget(remove)
715 714
716 715 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
717 716 return n
718 717
719 718 def walk(self, node=None, files=[], match=util.always, badmatch=None):
720 719 '''
721 720 walk recursively through the directory tree or a given
722 721 changeset, finding all files matched by the match
723 722 function
724 723
725 724 results are yielded in a tuple (src, filename), where src
726 725 is one of:
727 726 'f' the file was found in the directory tree
728 727 'm' the file was only in the dirstate and not in the tree
729 728 'b' file was not found and matched badmatch
730 729 '''
731 730
732 731 if node:
733 732 fdict = dict.fromkeys(files)
734 733 for fn in self.manifest.read(self.changelog.read(node)[0]):
735 734 for ffn in fdict:
736 735 # match if the file is the exact name or a directory
737 736 if ffn == fn or fn.startswith("%s/" % ffn):
738 737 del fdict[ffn]
739 738 break
740 739 if match(fn):
741 740 yield 'm', fn
742 741 for fn in fdict:
743 742 if badmatch and badmatch(fn):
744 743 if match(fn):
745 744 yield 'b', fn
746 745 else:
747 746 self.ui.warn(_('%s: No such file in rev %s\n') % (
748 747 util.pathto(self.getcwd(), fn), short(node)))
749 748 else:
750 749 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
751 750 yield src, fn
752 751
753 752 def status(self, node1=None, node2=None, files=[], match=util.always,
754 753 wlock=None, list_ignored=False, list_clean=False):
755 754 """return status of files between two nodes or node and working directory
756 755
757 756 If node1 is None, use the first dirstate parent instead.
758 757 If node2 is None, compare node1 with working directory.
759 758 """
760 759
761 760 def fcmp(fn, mf):
762 761 t1 = self.wread(fn)
763 762 return self.file(fn).cmp(mf.get(fn, nullid), t1)
764 763
765 764 def mfmatches(node):
766 765 change = self.changelog.read(node)
767 766 mf = self.manifest.read(change[0]).copy()
768 767 for fn in mf.keys():
769 768 if not match(fn):
770 769 del mf[fn]
771 770 return mf
772 771
773 772 modified, added, removed, deleted, unknown = [], [], [], [], []
774 773 ignored, clean = [], []
775 774
776 775 compareworking = False
777 776 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
778 777 compareworking = True
779 778
780 779 if not compareworking:
781 780 # read the manifest from node1 before the manifest from node2,
782 781 # so that we'll hit the manifest cache if we're going through
783 782 # all the revisions in parent->child order.
784 783 mf1 = mfmatches(node1)
785 784
786 785 # are we comparing the working directory?
787 786 if not node2:
788 787 if not wlock:
789 788 try:
790 789 wlock = self.wlock(wait=0)
791 790 except lock.LockException:
792 791 wlock = None
793 792 (lookup, modified, added, removed, deleted, unknown,
794 793 ignored, clean) = self.dirstate.status(files, match,
795 794 list_ignored, list_clean)
796 795
797 796 # are we comparing working dir against its parent?
798 797 if compareworking:
799 798 if lookup:
800 799 # do a full compare of any files that might have changed
801 800 mf2 = mfmatches(self.dirstate.parents()[0])
802 801 for f in lookup:
803 802 if fcmp(f, mf2):
804 803 modified.append(f)
805 804 else:
806 805 clean.append(f)
807 806 if wlock is not None:
808 807 self.dirstate.update([f], "n")
809 808 else:
810 809 # we are comparing working dir against non-parent
811 810 # generate a pseudo-manifest for the working dir
812 811 # XXX: create it in dirstate.py ?
813 812 mf2 = mfmatches(self.dirstate.parents()[0])
814 813 for f in lookup + modified + added:
815 814 mf2[f] = ""
816 815 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
817 816 for f in removed:
818 817 if f in mf2:
819 818 del mf2[f]
820 819 else:
821 820 # we are comparing two revisions
822 821 mf2 = mfmatches(node2)
823 822
824 823 if not compareworking:
825 824 # flush lists from dirstate before comparing manifests
826 825 modified, added, clean = [], [], []
827 826
828 827 # make sure to sort the files so we talk to the disk in a
829 828 # reasonable order
830 829 mf2keys = mf2.keys()
831 830 mf2keys.sort()
832 831 for fn in mf2keys:
833 832 if mf1.has_key(fn):
834 833 if mf1.flags(fn) != mf2.flags(fn) or \
835 834 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
836 835 modified.append(fn)
837 836 elif list_clean:
838 837 clean.append(fn)
839 838 del mf1[fn]
840 839 else:
841 840 added.append(fn)
842 841
843 842 removed = mf1.keys()
844 843
845 844 # sort and return results:
846 845 for l in modified, added, removed, deleted, unknown, ignored, clean:
847 846 l.sort()
848 847 return (modified, added, removed, deleted, unknown, ignored, clean)
849 848
850 849 def add(self, list, wlock=None):
851 850 if not wlock:
852 851 wlock = self.wlock()
853 852 for f in list:
854 853 p = self.wjoin(f)
855 854 if not os.path.exists(p):
856 855 self.ui.warn(_("%s does not exist!\n") % f)
857 856 elif not os.path.isfile(p):
858 857 self.ui.warn(_("%s not added: only files supported currently\n")
859 858 % f)
860 859 elif self.dirstate.state(f) in 'an':
861 860 self.ui.warn(_("%s already tracked!\n") % f)
862 861 else:
863 862 self.dirstate.update([f], "a")
864 863
865 864 def forget(self, list, wlock=None):
866 865 if not wlock:
867 866 wlock = self.wlock()
868 867 for f in list:
869 868 if self.dirstate.state(f) not in 'ai':
870 869 self.ui.warn(_("%s not added!\n") % f)
871 870 else:
872 871 self.dirstate.forget([f])
873 872
874 873 def remove(self, list, unlink=False, wlock=None):
875 874 if unlink:
876 875 for f in list:
877 876 try:
878 877 util.unlink(self.wjoin(f))
879 878 except OSError, inst:
880 879 if inst.errno != errno.ENOENT:
881 880 raise
882 881 if not wlock:
883 882 wlock = self.wlock()
884 883 for f in list:
885 884 p = self.wjoin(f)
886 885 if os.path.exists(p):
887 886 self.ui.warn(_("%s still exists!\n") % f)
888 887 elif self.dirstate.state(f) == 'a':
889 888 self.dirstate.forget([f])
890 889 elif f not in self.dirstate:
891 890 self.ui.warn(_("%s not tracked!\n") % f)
892 891 else:
893 892 self.dirstate.update([f], "r")
894 893
895 894 def undelete(self, list, wlock=None):
896 895 p = self.dirstate.parents()[0]
897 896 mn = self.changelog.read(p)[0]
898 897 m = self.manifest.read(mn)
899 898 if not wlock:
900 899 wlock = self.wlock()
901 900 for f in list:
902 901 if self.dirstate.state(f) not in "r":
903 902 self.ui.warn("%s not removed!\n" % f)
904 903 else:
905 904 t = self.file(f).read(m[f])
906 905 self.wwrite(f, t)
907 906 util.set_exec(self.wjoin(f), m.execf(f))
908 907 self.dirstate.update([f], "n")
909 908
910 909 def copy(self, source, dest, wlock=None):
911 910 p = self.wjoin(dest)
912 911 if not os.path.exists(p):
913 912 self.ui.warn(_("%s does not exist!\n") % dest)
914 913 elif not os.path.isfile(p):
915 914 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
916 915 else:
917 916 if not wlock:
918 917 wlock = self.wlock()
919 918 if self.dirstate.state(dest) == '?':
920 919 self.dirstate.update([dest], "a")
921 920 self.dirstate.copy(source, dest)
922 921
923 922 def heads(self, start=None):
924 923 heads = self.changelog.heads(start)
925 924 # sort the output in rev descending order
926 925 heads = [(-self.changelog.rev(h), h) for h in heads]
927 926 heads.sort()
928 927 return [n for (r, n) in heads]
929 928
930 929 # branchlookup returns a dict giving a list of branches for
931 930 # each head. A branch is defined as the tag of a node or
932 931 # the branch of the node's parents. If a node has multiple
933 932 # branch tags, tags are eliminated if they are visible from other
934 933 # branch tags.
935 934 #
936 935 # So, for this graph: a->b->c->d->e
937 936 # \ /
938 937 # aa -----/
939 938 # a has tag 2.6.12
940 939 # d has tag 2.6.13
941 940 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
942 941 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
943 942 # from the list.
944 943 #
945 944 # It is possible that more than one head will have the same branch tag.
946 945 # callers need to check the result for multiple heads under the same
947 946 # branch tag if that is a problem for them (ie checkout of a specific
948 947 # branch).
949 948 #
950 949 # passing in a specific branch will limit the depth of the search
951 950 # through the parents. It won't limit the branches returned in the
952 951 # result though.
953 952 def branchlookup(self, heads=None, branch=None):
954 953 if not heads:
955 954 heads = self.heads()
956 955 headt = [ h for h in heads ]
957 956 chlog = self.changelog
958 957 branches = {}
959 958 merges = []
960 959 seenmerge = {}
961 960
962 961 # traverse the tree once for each head, recording in the branches
963 962 # dict which tags are visible from this head. The branches
964 963 # dict also records which tags are visible from each tag
965 964 # while we traverse.
966 965 while headt or merges:
967 966 if merges:
968 967 n, found = merges.pop()
969 968 visit = [n]
970 969 else:
971 970 h = headt.pop()
972 971 visit = [h]
973 972 found = [h]
974 973 seen = {}
975 974 while visit:
976 975 n = visit.pop()
977 976 if n in seen:
978 977 continue
979 978 pp = chlog.parents(n)
980 979 tags = self.nodetags(n)
981 980 if tags:
982 981 for x in tags:
983 982 if x == 'tip':
984 983 continue
985 984 for f in found:
986 985 branches.setdefault(f, {})[n] = 1
987 986 branches.setdefault(n, {})[n] = 1
988 987 break
989 988 if n not in found:
990 989 found.append(n)
991 990 if branch in tags:
992 991 continue
993 992 seen[n] = 1
994 993 if pp[1] != nullid and n not in seenmerge:
995 994 merges.append((pp[1], [x for x in found]))
996 995 seenmerge[n] = 1
997 996 if pp[0] != nullid:
998 997 visit.append(pp[0])
999 998 # traverse the branches dict, eliminating branch tags from each
1000 999 # head that are visible from another branch tag for that head.
1001 1000 out = {}
1002 1001 viscache = {}
1003 1002 for h in heads:
1004 1003 def visible(node):
1005 1004 if node in viscache:
1006 1005 return viscache[node]
1007 1006 ret = {}
1008 1007 visit = [node]
1009 1008 while visit:
1010 1009 x = visit.pop()
1011 1010 if x in viscache:
1012 1011 ret.update(viscache[x])
1013 1012 elif x not in ret:
1014 1013 ret[x] = 1
1015 1014 if x in branches:
1016 1015 visit[len(visit):] = branches[x].keys()
1017 1016 viscache[node] = ret
1018 1017 return ret
1019 1018 if h not in branches:
1020 1019 continue
1021 1020 # O(n^2), but somewhat limited. This only searches the
1022 1021 # tags visible from a specific head, not all the tags in the
1023 1022 # whole repo.
1024 1023 for b in branches[h]:
1025 1024 vis = False
1026 1025 for bb in branches[h].keys():
1027 1026 if b != bb:
1028 1027 if b in visible(bb):
1029 1028 vis = True
1030 1029 break
1031 1030 if not vis:
1032 1031 l = out.setdefault(h, [])
1033 1032 l[len(l):] = self.nodetags(b)
1034 1033 return out
1035 1034
1036 1035 def branches(self, nodes):
1037 1036 if not nodes:
1038 1037 nodes = [self.changelog.tip()]
1039 1038 b = []
1040 1039 for n in nodes:
1041 1040 t = n
1042 1041 while 1:
1043 1042 p = self.changelog.parents(n)
1044 1043 if p[1] != nullid or p[0] == nullid:
1045 1044 b.append((t, n, p[0], p[1]))
1046 1045 break
1047 1046 n = p[0]
1048 1047 return b
1049 1048
1050 1049 def between(self, pairs):
1051 1050 r = []
1052 1051
1053 1052 for top, bottom in pairs:
1054 1053 n, l, i = top, [], 0
1055 1054 f = 1
1056 1055
1057 1056 while n != bottom:
1058 1057 p = self.changelog.parents(n)[0]
1059 1058 if i == f:
1060 1059 l.append(n)
1061 1060 f = f * 2
1062 1061 n = p
1063 1062 i += 1
1064 1063
1065 1064 r.append(l)
1066 1065
1067 1066 return r
1068 1067
1069 1068 def findincoming(self, remote, base=None, heads=None, force=False):
1070 1069 """Return list of roots of the subsets of missing nodes from remote
1071 1070
1072 1071 If base dict is specified, assume that these nodes and their parents
1073 1072 exist on the remote side and that no child of a node of base exists
1074 1073 in both remote and self.
1075 1074 Furthermore base will be updated to include the nodes that exists
1076 1075 in self and remote but no children exists in self and remote.
1077 1076 If a list of heads is specified, return only nodes which are heads
1078 1077 or ancestors of these heads.
1079 1078
1080 1079 All the ancestors of base are in self and in remote.
1081 1080 All the descendants of the list returned are missing in self.
1082 1081 (and so we know that the rest of the nodes are missing in remote, see
1083 1082 outgoing)
1084 1083 """
1085 1084 m = self.changelog.nodemap
1086 1085 search = []
1087 1086 fetch = {}
1088 1087 seen = {}
1089 1088 seenbranch = {}
1090 1089 if base == None:
1091 1090 base = {}
1092 1091
1093 1092 if not heads:
1094 1093 heads = remote.heads()
1095 1094
1096 1095 if self.changelog.tip() == nullid:
1097 1096 base[nullid] = 1
1098 1097 if heads != [nullid]:
1099 1098 return [nullid]
1100 1099 return []
1101 1100
1102 1101 # assume we're closer to the tip than the root
1103 1102 # and start by examining the heads
1104 1103 self.ui.status(_("searching for changes\n"))
1105 1104
1106 1105 unknown = []
1107 1106 for h in heads:
1108 1107 if h not in m:
1109 1108 unknown.append(h)
1110 1109 else:
1111 1110 base[h] = 1
1112 1111
1113 1112 if not unknown:
1114 1113 return []
1115 1114
1116 1115 req = dict.fromkeys(unknown)
1117 1116 reqcnt = 0
1118 1117
1119 1118 # search through remote branches
1120 1119 # a 'branch' here is a linear segment of history, with four parts:
1121 1120 # head, root, first parent, second parent
1122 1121 # (a branch always has two parents (or none) by definition)
1123 1122 unknown = remote.branches(unknown)
1124 1123 while unknown:
1125 1124 r = []
1126 1125 while unknown:
1127 1126 n = unknown.pop(0)
1128 1127 if n[0] in seen:
1129 1128 continue
1130 1129
1131 1130 self.ui.debug(_("examining %s:%s\n")
1132 1131 % (short(n[0]), short(n[1])))
1133 1132 if n[0] == nullid: # found the end of the branch
1134 1133 pass
1135 1134 elif n in seenbranch:
1136 1135 self.ui.debug(_("branch already found\n"))
1137 1136 continue
1138 1137 elif n[1] and n[1] in m: # do we know the base?
1139 1138 self.ui.debug(_("found incomplete branch %s:%s\n")
1140 1139 % (short(n[0]), short(n[1])))
1141 1140 search.append(n) # schedule branch range for scanning
1142 1141 seenbranch[n] = 1
1143 1142 else:
1144 1143 if n[1] not in seen and n[1] not in fetch:
1145 1144 if n[2] in m and n[3] in m:
1146 1145 self.ui.debug(_("found new changeset %s\n") %
1147 1146 short(n[1]))
1148 1147 fetch[n[1]] = 1 # earliest unknown
1149 1148 for p in n[2:4]:
1150 1149 if p in m:
1151 1150 base[p] = 1 # latest known
1152 1151
1153 1152 for p in n[2:4]:
1154 1153 if p not in req and p not in m:
1155 1154 r.append(p)
1156 1155 req[p] = 1
1157 1156 seen[n[0]] = 1
1158 1157
1159 1158 if r:
1160 1159 reqcnt += 1
1161 1160 self.ui.debug(_("request %d: %s\n") %
1162 1161 (reqcnt, " ".join(map(short, r))))
1163 1162 for p in xrange(0, len(r), 10):
1164 1163 for b in remote.branches(r[p:p+10]):
1165 1164 self.ui.debug(_("received %s:%s\n") %
1166 1165 (short(b[0]), short(b[1])))
1167 1166 unknown.append(b)
1168 1167
1169 1168 # do binary search on the branches we found
1170 1169 while search:
1171 1170 n = search.pop(0)
1172 1171 reqcnt += 1
1173 1172 l = remote.between([(n[0], n[1])])[0]
1174 1173 l.append(n[1])
1175 1174 p = n[0]
1176 1175 f = 1
1177 1176 for i in l:
1178 1177 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1179 1178 if i in m:
1180 1179 if f <= 2:
1181 1180 self.ui.debug(_("found new branch changeset %s\n") %
1182 1181 short(p))
1183 1182 fetch[p] = 1
1184 1183 base[i] = 1
1185 1184 else:
1186 1185 self.ui.debug(_("narrowed branch search to %s:%s\n")
1187 1186 % (short(p), short(i)))
1188 1187 search.append((p, i))
1189 1188 break
1190 1189 p, f = i, f * 2
1191 1190
1192 1191 # sanity check our fetch list
1193 1192 for f in fetch.keys():
1194 1193 if f in m:
1195 1194 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1196 1195
1197 1196 if base.keys() == [nullid]:
1198 1197 if force:
1199 1198 self.ui.warn(_("warning: repository is unrelated\n"))
1200 1199 else:
1201 1200 raise util.Abort(_("repository is unrelated"))
1202 1201
1203 1202 self.ui.debug(_("found new changesets starting at ") +
1204 1203 " ".join([short(f) for f in fetch]) + "\n")
1205 1204
1206 1205 self.ui.debug(_("%d total queries\n") % reqcnt)
1207 1206
1208 1207 return fetch.keys()
1209 1208
1210 1209 def findoutgoing(self, remote, base=None, heads=None, force=False):
1211 1210 """Return list of nodes that are roots of subsets not in remote
1212 1211
1213 1212 If base dict is specified, assume that these nodes and their parents
1214 1213 exist on the remote side.
1215 1214 If a list of heads is specified, return only nodes which are heads
1216 1215 or ancestors of these heads, and return a second element which
1217 1216 contains all remote heads which get new children.
1218 1217 """
1219 1218 if base == None:
1220 1219 base = {}
1221 1220 self.findincoming(remote, base, heads, force=force)
1222 1221
1223 1222 self.ui.debug(_("common changesets up to ")
1224 1223 + " ".join(map(short, base.keys())) + "\n")
1225 1224
1226 1225 remain = dict.fromkeys(self.changelog.nodemap)
1227 1226
1228 1227 # prune everything remote has from the tree
1229 1228 del remain[nullid]
1230 1229 remove = base.keys()
1231 1230 while remove:
1232 1231 n = remove.pop(0)
1233 1232 if n in remain:
1234 1233 del remain[n]
1235 1234 for p in self.changelog.parents(n):
1236 1235 remove.append(p)
1237 1236
1238 1237 # find every node whose parents have been pruned
1239 1238 subset = []
1240 1239 # find every remote head that will get new children
1241 1240 updated_heads = {}
1242 1241 for n in remain:
1243 1242 p1, p2 = self.changelog.parents(n)
1244 1243 if p1 not in remain and p2 not in remain:
1245 1244 subset.append(n)
1246 1245 if heads:
1247 1246 if p1 in heads:
1248 1247 updated_heads[p1] = True
1249 1248 if p2 in heads:
1250 1249 updated_heads[p2] = True
1251 1250
1252 1251 # this is the set of all roots we have to push
1253 1252 if heads:
1254 1253 return subset, updated_heads.keys()
1255 1254 else:
1256 1255 return subset
1257 1256
1258 1257 def pull(self, remote, heads=None, force=False, lock=None):
1259 1258 mylock = False
1260 1259 if not lock:
1261 1260 lock = self.lock()
1262 1261 mylock = True
1263 1262
1264 1263 try:
1265 1264 fetch = self.findincoming(remote, force=force)
1266 1265 if fetch == [nullid]:
1267 1266 self.ui.status(_("requesting all changes\n"))
1268 1267
1269 1268 if not fetch:
1270 1269 self.ui.status(_("no changes found\n"))
1271 1270 return 0
1272 1271
1273 1272 if heads is None:
1274 1273 cg = remote.changegroup(fetch, 'pull')
1275 1274 else:
1276 1275 if 'changegroupsubset' not in remote.capabilities:
1277 1276 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1278 1277 cg = remote.changegroupsubset(fetch, heads, 'pull')
1279 1278 return self.addchangegroup(cg, 'pull', remote.url())
1280 1279 finally:
1281 1280 if mylock:
1282 1281 lock.release()
1283 1282
1284 1283 def push(self, remote, force=False, revs=None):
1285 1284 # there are two ways to push to remote repo:
1286 1285 #
1287 1286 # addchangegroup assumes local user can lock remote
1288 1287 # repo (local filesystem, old ssh servers).
1289 1288 #
1290 1289 # unbundle assumes local user cannot lock remote repo (new ssh
1291 1290 # servers, http servers).
1292 1291
1293 1292 if remote.capable('unbundle'):
1294 1293 return self.push_unbundle(remote, force, revs)
1295 1294 return self.push_addchangegroup(remote, force, revs)
1296 1295
1297 1296 def prepush(self, remote, force, revs):
1298 1297 base = {}
1299 1298 remote_heads = remote.heads()
1300 1299 inc = self.findincoming(remote, base, remote_heads, force=force)
1301 1300
1302 1301 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1303 1302 if revs is not None:
1304 1303 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1305 1304 else:
1306 1305 bases, heads = update, self.changelog.heads()
1307 1306
1308 1307 if not bases:
1309 1308 self.ui.status(_("no changes found\n"))
1310 1309 return None, 1
1311 1310 elif not force:
1312 1311 # check if we're creating new remote heads
1313 1312 # to be a remote head after push, node must be either
1314 1313 # - unknown locally
1315 1314 # - a local outgoing head descended from update
1316 1315 # - a remote head that's known locally and not
1317 1316 # ancestral to an outgoing head
1318 1317
1319 1318 warn = 0
1320 1319
1321 1320 if remote_heads == [nullid]:
1322 1321 warn = 0
1323 1322 elif not revs and len(heads) > len(remote_heads):
1324 1323 warn = 1
1325 1324 else:
1326 1325 newheads = list(heads)
1327 1326 for r in remote_heads:
1328 1327 if r in self.changelog.nodemap:
1329 1328 desc = self.changelog.heads(r)
1330 1329 l = [h for h in heads if h in desc]
1331 1330 if not l:
1332 1331 newheads.append(r)
1333 1332 else:
1334 1333 newheads.append(r)
1335 1334 if len(newheads) > len(remote_heads):
1336 1335 warn = 1
1337 1336
1338 1337 if warn:
1339 1338 self.ui.warn(_("abort: push creates new remote branches!\n"))
1340 1339 self.ui.status(_("(did you forget to merge?"
1341 1340 " use push -f to force)\n"))
1342 1341 return None, 1
1343 1342 elif inc:
1344 1343 self.ui.warn(_("note: unsynced remote changes!\n"))
1345 1344
1346 1345
1347 1346 if revs is None:
1348 1347 cg = self.changegroup(update, 'push')
1349 1348 else:
1350 1349 cg = self.changegroupsubset(update, revs, 'push')
1351 1350 return cg, remote_heads
1352 1351
1353 1352 def push_addchangegroup(self, remote, force, revs):
1354 1353 lock = remote.lock()
1355 1354
1356 1355 ret = self.prepush(remote, force, revs)
1357 1356 if ret[0] is not None:
1358 1357 cg, remote_heads = ret
1359 1358 return remote.addchangegroup(cg, 'push', self.url())
1360 1359 return ret[1]
1361 1360
1362 1361 def push_unbundle(self, remote, force, revs):
1363 1362 # local repo finds heads on server, finds out what revs it
1364 1363 # must push. once revs transferred, if server finds it has
1365 1364 # different heads (someone else won commit/push race), server
1366 1365 # aborts.
1367 1366
1368 1367 ret = self.prepush(remote, force, revs)
1369 1368 if ret[0] is not None:
1370 1369 cg, remote_heads = ret
1371 1370 if force: remote_heads = ['force']
1372 1371 return remote.unbundle(cg, remote_heads, 'push')
1373 1372 return ret[1]
1374 1373
1375 1374 def changegroupinfo(self, nodes):
1376 1375 self.ui.note(_("%d changesets found\n") % len(nodes))
1377 1376 if self.ui.debugflag:
1378 1377 self.ui.debug(_("List of changesets:\n"))
1379 1378 for node in nodes:
1380 1379 self.ui.debug("%s\n" % hex(node))
1381 1380
1382 1381 def changegroupsubset(self, bases, heads, source):
1383 1382 """This function generates a changegroup consisting of all the nodes
1384 1383 that are descendents of any of the bases, and ancestors of any of
1385 1384 the heads.
1386 1385
1387 1386 It is fairly complex as determining which filenodes and which
1388 1387 manifest nodes need to be included for the changeset to be complete
1389 1388 is non-trivial.
1390 1389
1391 1390 Another wrinkle is doing the reverse, figuring out which changeset in
1392 1391 the changegroup a particular filenode or manifestnode belongs to."""
1393 1392
1394 1393 self.hook('preoutgoing', throw=True, source=source)
1395 1394
1396 1395 # Set up some initial variables
1397 1396 # Make it easy to refer to self.changelog
1398 1397 cl = self.changelog
1399 1398 # msng is short for missing - compute the list of changesets in this
1400 1399 # changegroup.
1401 1400 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1402 1401 self.changegroupinfo(msng_cl_lst)
1403 1402 # Some bases may turn out to be superfluous, and some heads may be
1404 1403 # too. nodesbetween will return the minimal set of bases and heads
1405 1404 # necessary to re-create the changegroup.
1406 1405
1407 1406 # Known heads are the list of heads that it is assumed the recipient
1408 1407 # of this changegroup will know about.
1409 1408 knownheads = {}
1410 1409 # We assume that all parents of bases are known heads.
1411 1410 for n in bases:
1412 1411 for p in cl.parents(n):
1413 1412 if p != nullid:
1414 1413 knownheads[p] = 1
1415 1414 knownheads = knownheads.keys()
1416 1415 if knownheads:
1417 1416 # Now that we know what heads are known, we can compute which
1418 1417 # changesets are known. The recipient must know about all
1419 1418 # changesets required to reach the known heads from the null
1420 1419 # changeset.
1421 1420 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1422 1421 junk = None
1423 1422 # Transform the list into an ersatz set.
1424 1423 has_cl_set = dict.fromkeys(has_cl_set)
1425 1424 else:
1426 1425 # If there were no known heads, the recipient cannot be assumed to
1427 1426 # know about any changesets.
1428 1427 has_cl_set = {}
1429 1428
1430 1429 # Make it easy to refer to self.manifest
1431 1430 mnfst = self.manifest
1432 1431 # We don't know which manifests are missing yet
1433 1432 msng_mnfst_set = {}
1434 1433 # Nor do we know which filenodes are missing.
1435 1434 msng_filenode_set = {}
1436 1435
1437 1436 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1438 1437 junk = None
1439 1438
1440 1439 # A changeset always belongs to itself, so the changenode lookup
1441 1440 # function for a changenode is identity.
1442 1441 def identity(x):
1443 1442 return x
1444 1443
1445 1444 # A function generating function. Sets up an environment for the
1446 1445 # inner function.
1447 1446 def cmp_by_rev_func(revlog):
1448 1447 # Compare two nodes by their revision number in the environment's
1449 1448 # revision history. Since the revision number both represents the
1450 1449 # most efficient order to read the nodes in, and represents a
1451 1450 # topological sorting of the nodes, this function is often useful.
1452 1451 def cmp_by_rev(a, b):
1453 1452 return cmp(revlog.rev(a), revlog.rev(b))
1454 1453 return cmp_by_rev
1455 1454
1456 1455 # If we determine that a particular file or manifest node must be a
1457 1456 # node that the recipient of the changegroup will already have, we can
1458 1457 # also assume the recipient will have all the parents. This function
1459 1458 # prunes them from the set of missing nodes.
1460 1459 def prune_parents(revlog, hasset, msngset):
1461 1460 haslst = hasset.keys()
1462 1461 haslst.sort(cmp_by_rev_func(revlog))
1463 1462 for node in haslst:
1464 1463 parentlst = [p for p in revlog.parents(node) if p != nullid]
1465 1464 while parentlst:
1466 1465 n = parentlst.pop()
1467 1466 if n not in hasset:
1468 1467 hasset[n] = 1
1469 1468 p = [p for p in revlog.parents(n) if p != nullid]
1470 1469 parentlst.extend(p)
1471 1470 for n in hasset:
1472 1471 msngset.pop(n, None)
1473 1472
1474 1473 # This is a function generating function used to set up an environment
1475 1474 # for the inner function to execute in.
1476 1475 def manifest_and_file_collector(changedfileset):
1477 1476 # This is an information gathering function that gathers
1478 1477 # information from each changeset node that goes out as part of
1479 1478 # the changegroup. The information gathered is a list of which
1480 1479 # manifest nodes are potentially required (the recipient may
1481 1480 # already have them) and total list of all files which were
1482 1481 # changed in any changeset in the changegroup.
1483 1482 #
1484 1483 # We also remember the first changenode we saw any manifest
1485 1484 # referenced by so we can later determine which changenode 'owns'
1486 1485 # the manifest.
1487 1486 def collect_manifests_and_files(clnode):
1488 1487 c = cl.read(clnode)
1489 1488 for f in c[3]:
1490 1489 # This is to make sure we only have one instance of each
1491 1490 # filename string for each filename.
1492 1491 changedfileset.setdefault(f, f)
1493 1492 msng_mnfst_set.setdefault(c[0], clnode)
1494 1493 return collect_manifests_and_files
1495 1494
1496 1495 # Figure out which manifest nodes (of the ones we think might be part
1497 1496 # of the changegroup) the recipient must know about and remove them
1498 1497 # from the changegroup.
1499 1498 def prune_manifests():
1500 1499 has_mnfst_set = {}
1501 1500 for n in msng_mnfst_set:
1502 1501 # If a 'missing' manifest thinks it belongs to a changenode
1503 1502 # the recipient is assumed to have, obviously the recipient
1504 1503 # must have that manifest.
1505 1504 linknode = cl.node(mnfst.linkrev(n))
1506 1505 if linknode in has_cl_set:
1507 1506 has_mnfst_set[n] = 1
1508 1507 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1509 1508
1510 1509 # Use the information collected in collect_manifests_and_files to say
1511 1510 # which changenode any manifestnode belongs to.
1512 1511 def lookup_manifest_link(mnfstnode):
1513 1512 return msng_mnfst_set[mnfstnode]
1514 1513
1515 1514 # A function generating function that sets up the initial environment
1516 1515 # the inner function.
1517 1516 def filenode_collector(changedfiles):
1518 1517 next_rev = [0]
1519 1518 # This gathers information from each manifestnode included in the
1520 1519 # changegroup about which filenodes the manifest node references
1521 1520 # so we can include those in the changegroup too.
1522 1521 #
1523 1522 # It also remembers which changenode each filenode belongs to. It
1524 1523 # does this by assuming the a filenode belongs to the changenode
1525 1524 # the first manifest that references it belongs to.
1526 1525 def collect_msng_filenodes(mnfstnode):
1527 1526 r = mnfst.rev(mnfstnode)
1528 1527 if r == next_rev[0]:
1529 1528 # If the last rev we looked at was the one just previous,
1530 1529 # we only need to see a diff.
1531 1530 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1532 1531 # For each line in the delta
1533 1532 for dline in delta.splitlines():
1534 1533 # get the filename and filenode for that line
1535 1534 f, fnode = dline.split('\0')
1536 1535 fnode = bin(fnode[:40])
1537 1536 f = changedfiles.get(f, None)
1538 1537 # And if the file is in the list of files we care
1539 1538 # about.
1540 1539 if f is not None:
1541 1540 # Get the changenode this manifest belongs to
1542 1541 clnode = msng_mnfst_set[mnfstnode]
1543 1542 # Create the set of filenodes for the file if
1544 1543 # there isn't one already.
1545 1544 ndset = msng_filenode_set.setdefault(f, {})
1546 1545 # And set the filenode's changelog node to the
1547 1546 # manifest's if it hasn't been set already.
1548 1547 ndset.setdefault(fnode, clnode)
1549 1548 else:
1550 1549 # Otherwise we need a full manifest.
1551 1550 m = mnfst.read(mnfstnode)
1552 1551 # For every file in we care about.
1553 1552 for f in changedfiles:
1554 1553 fnode = m.get(f, None)
1555 1554 # If it's in the manifest
1556 1555 if fnode is not None:
1557 1556 # See comments above.
1558 1557 clnode = msng_mnfst_set[mnfstnode]
1559 1558 ndset = msng_filenode_set.setdefault(f, {})
1560 1559 ndset.setdefault(fnode, clnode)
1561 1560 # Remember the revision we hope to see next.
1562 1561 next_rev[0] = r + 1
1563 1562 return collect_msng_filenodes
1564 1563
1565 1564 # We have a list of filenodes we think we need for a file, lets remove
1566 1565 # all those we now the recipient must have.
1567 1566 def prune_filenodes(f, filerevlog):
1568 1567 msngset = msng_filenode_set[f]
1569 1568 hasset = {}
1570 1569 # If a 'missing' filenode thinks it belongs to a changenode we
1571 1570 # assume the recipient must have, then the recipient must have
1572 1571 # that filenode.
1573 1572 for n in msngset:
1574 1573 clnode = cl.node(filerevlog.linkrev(n))
1575 1574 if clnode in has_cl_set:
1576 1575 hasset[n] = 1
1577 1576 prune_parents(filerevlog, hasset, msngset)
1578 1577
1579 1578 # A function generator function that sets up the a context for the
1580 1579 # inner function.
1581 1580 def lookup_filenode_link_func(fname):
1582 1581 msngset = msng_filenode_set[fname]
1583 1582 # Lookup the changenode the filenode belongs to.
1584 1583 def lookup_filenode_link(fnode):
1585 1584 return msngset[fnode]
1586 1585 return lookup_filenode_link
1587 1586
1588 1587 # Now that we have all theses utility functions to help out and
1589 1588 # logically divide up the task, generate the group.
1590 1589 def gengroup():
1591 1590 # The set of changed files starts empty.
1592 1591 changedfiles = {}
1593 1592 # Create a changenode group generator that will call our functions
1594 1593 # back to lookup the owning changenode and collect information.
1595 1594 group = cl.group(msng_cl_lst, identity,
1596 1595 manifest_and_file_collector(changedfiles))
1597 1596 for chnk in group:
1598 1597 yield chnk
1599 1598
1600 1599 # The list of manifests has been collected by the generator
1601 1600 # calling our functions back.
1602 1601 prune_manifests()
1603 1602 msng_mnfst_lst = msng_mnfst_set.keys()
1604 1603 # Sort the manifestnodes by revision number.
1605 1604 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1606 1605 # Create a generator for the manifestnodes that calls our lookup
1607 1606 # and data collection functions back.
1608 1607 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1609 1608 filenode_collector(changedfiles))
1610 1609 for chnk in group:
1611 1610 yield chnk
1612 1611
1613 1612 # These are no longer needed, dereference and toss the memory for
1614 1613 # them.
1615 1614 msng_mnfst_lst = None
1616 1615 msng_mnfst_set.clear()
1617 1616
1618 1617 changedfiles = changedfiles.keys()
1619 1618 changedfiles.sort()
1620 1619 # Go through all our files in order sorted by name.
1621 1620 for fname in changedfiles:
1622 1621 filerevlog = self.file(fname)
1623 1622 # Toss out the filenodes that the recipient isn't really
1624 1623 # missing.
1625 1624 if msng_filenode_set.has_key(fname):
1626 1625 prune_filenodes(fname, filerevlog)
1627 1626 msng_filenode_lst = msng_filenode_set[fname].keys()
1628 1627 else:
1629 1628 msng_filenode_lst = []
1630 1629 # If any filenodes are left, generate the group for them,
1631 1630 # otherwise don't bother.
1632 1631 if len(msng_filenode_lst) > 0:
1633 1632 yield changegroup.genchunk(fname)
1634 1633 # Sort the filenodes by their revision #
1635 1634 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1636 1635 # Create a group generator and only pass in a changenode
1637 1636 # lookup function as we need to collect no information
1638 1637 # from filenodes.
1639 1638 group = filerevlog.group(msng_filenode_lst,
1640 1639 lookup_filenode_link_func(fname))
1641 1640 for chnk in group:
1642 1641 yield chnk
1643 1642 if msng_filenode_set.has_key(fname):
1644 1643 # Don't need this anymore, toss it to free memory.
1645 1644 del msng_filenode_set[fname]
1646 1645 # Signal that no more groups are left.
1647 1646 yield changegroup.closechunk()
1648 1647
1649 1648 if msng_cl_lst:
1650 1649 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1651 1650
1652 1651 return util.chunkbuffer(gengroup())
1653 1652
1654 1653 def changegroup(self, basenodes, source):
1655 1654 """Generate a changegroup of all nodes that we have that a recipient
1656 1655 doesn't.
1657 1656
1658 1657 This is much easier than the previous function as we can assume that
1659 1658 the recipient has any changenode we aren't sending them."""
1660 1659
1661 1660 self.hook('preoutgoing', throw=True, source=source)
1662 1661
1663 1662 cl = self.changelog
1664 1663 nodes = cl.nodesbetween(basenodes, None)[0]
1665 1664 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1666 1665 self.changegroupinfo(nodes)
1667 1666
1668 1667 def identity(x):
1669 1668 return x
1670 1669
1671 1670 def gennodelst(revlog):
1672 1671 for r in xrange(0, revlog.count()):
1673 1672 n = revlog.node(r)
1674 1673 if revlog.linkrev(n) in revset:
1675 1674 yield n
1676 1675
1677 1676 def changed_file_collector(changedfileset):
1678 1677 def collect_changed_files(clnode):
1679 1678 c = cl.read(clnode)
1680 1679 for fname in c[3]:
1681 1680 changedfileset[fname] = 1
1682 1681 return collect_changed_files
1683 1682
1684 1683 def lookuprevlink_func(revlog):
1685 1684 def lookuprevlink(n):
1686 1685 return cl.node(revlog.linkrev(n))
1687 1686 return lookuprevlink
1688 1687
1689 1688 def gengroup():
1690 1689 # construct a list of all changed files
1691 1690 changedfiles = {}
1692 1691
1693 1692 for chnk in cl.group(nodes, identity,
1694 1693 changed_file_collector(changedfiles)):
1695 1694 yield chnk
1696 1695 changedfiles = changedfiles.keys()
1697 1696 changedfiles.sort()
1698 1697
1699 1698 mnfst = self.manifest
1700 1699 nodeiter = gennodelst(mnfst)
1701 1700 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1702 1701 yield chnk
1703 1702
1704 1703 for fname in changedfiles:
1705 1704 filerevlog = self.file(fname)
1706 1705 nodeiter = gennodelst(filerevlog)
1707 1706 nodeiter = list(nodeiter)
1708 1707 if nodeiter:
1709 1708 yield changegroup.genchunk(fname)
1710 1709 lookup = lookuprevlink_func(filerevlog)
1711 1710 for chnk in filerevlog.group(nodeiter, lookup):
1712 1711 yield chnk
1713 1712
1714 1713 yield changegroup.closechunk()
1715 1714
1716 1715 if nodes:
1717 1716 self.hook('outgoing', node=hex(nodes[0]), source=source)
1718 1717
1719 1718 return util.chunkbuffer(gengroup())
1720 1719
1721 1720 def addchangegroup(self, source, srctype, url):
1722 1721 """add changegroup to repo.
1723 1722 returns number of heads modified or added + 1."""
1724 1723
1725 1724 def csmap(x):
1726 1725 self.ui.debug(_("add changeset %s\n") % short(x))
1727 1726 return cl.count()
1728 1727
1729 1728 def revmap(x):
1730 1729 return cl.rev(x)
1731 1730
1732 1731 if not source:
1733 1732 return 0
1734 1733
1735 1734 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1736 1735
1737 1736 changesets = files = revisions = 0
1738 1737
1739 1738 tr = self.transaction()
1740 1739
1741 1740 # write changelog data to temp files so concurrent readers will not see
1742 1741 # inconsistent view
1743 1742 cl = None
1744 1743 try:
1745 1744 cl = appendfile.appendchangelog(self.sopener,
1746 1745 self.changelog.version)
1747 1746
1748 1747 oldheads = len(cl.heads())
1749 1748
1750 1749 # pull off the changeset group
1751 1750 self.ui.status(_("adding changesets\n"))
1752 1751 cor = cl.count() - 1
1753 1752 chunkiter = changegroup.chunkiter(source)
1754 1753 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1755 1754 raise util.Abort(_("received changelog group is empty"))
1756 1755 cnr = cl.count() - 1
1757 1756 changesets = cnr - cor
1758 1757
1759 1758 # pull off the manifest group
1760 1759 self.ui.status(_("adding manifests\n"))
1761 1760 chunkiter = changegroup.chunkiter(source)
1762 1761 # no need to check for empty manifest group here:
1763 1762 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1764 1763 # no new manifest will be created and the manifest group will
1765 1764 # be empty during the pull
1766 1765 self.manifest.addgroup(chunkiter, revmap, tr)
1767 1766
1768 1767 # process the files
1769 1768 self.ui.status(_("adding file changes\n"))
1770 1769 while 1:
1771 1770 f = changegroup.getchunk(source)
1772 1771 if not f:
1773 1772 break
1774 1773 self.ui.debug(_("adding %s revisions\n") % f)
1775 1774 fl = self.file(f)
1776 1775 o = fl.count()
1777 1776 chunkiter = changegroup.chunkiter(source)
1778 1777 if fl.addgroup(chunkiter, revmap, tr) is None:
1779 1778 raise util.Abort(_("received file revlog group is empty"))
1780 1779 revisions += fl.count() - o
1781 1780 files += 1
1782 1781
1783 1782 cl.writedata()
1784 1783 finally:
1785 1784 if cl:
1786 1785 cl.cleanup()
1787 1786
1788 1787 # make changelog see real files again
1789 1788 self.changelog = changelog.changelog(self.sopener,
1790 1789 self.changelog.version)
1791 1790 self.changelog.checkinlinesize(tr)
1792 1791
1793 1792 newheads = len(self.changelog.heads())
1794 1793 heads = ""
1795 1794 if oldheads and newheads != oldheads:
1796 1795 heads = _(" (%+d heads)") % (newheads - oldheads)
1797 1796
1798 1797 self.ui.status(_("added %d changesets"
1799 1798 " with %d changes to %d files%s\n")
1800 1799 % (changesets, revisions, files, heads))
1801 1800
1802 1801 if changesets > 0:
1803 1802 self.hook('pretxnchangegroup', throw=True,
1804 1803 node=hex(self.changelog.node(cor+1)), source=srctype,
1805 1804 url=url)
1806 1805
1807 1806 tr.close()
1808 1807
1809 1808 if changesets > 0:
1810 1809 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1811 1810 source=srctype, url=url)
1812 1811
1813 1812 for i in xrange(cor + 1, cnr + 1):
1814 1813 self.hook("incoming", node=hex(self.changelog.node(i)),
1815 1814 source=srctype, url=url)
1816 1815
1817 1816 return newheads - oldheads + 1
1818 1817
1819 1818
1820 1819 def stream_in(self, remote):
1821 1820 fp = remote.stream_out()
1822 1821 l = fp.readline()
1823 1822 try:
1824 1823 resp = int(l)
1825 1824 except ValueError:
1826 1825 raise util.UnexpectedOutput(
1827 1826 _('Unexpected response from remote server:'), l)
1828 1827 if resp == 1:
1829 1828 raise util.Abort(_('operation forbidden by server'))
1830 1829 elif resp == 2:
1831 1830 raise util.Abort(_('locking the remote repository failed'))
1832 1831 elif resp != 0:
1833 1832 raise util.Abort(_('the server sent an unknown error code'))
1834 1833 self.ui.status(_('streaming all changes\n'))
1835 1834 l = fp.readline()
1836 1835 try:
1837 1836 total_files, total_bytes = map(int, l.split(' ', 1))
1838 1837 except ValueError, TypeError:
1839 1838 raise util.UnexpectedOutput(
1840 1839 _('Unexpected response from remote server:'), l)
1841 1840 self.ui.status(_('%d files to transfer, %s of data\n') %
1842 1841 (total_files, util.bytecount(total_bytes)))
1843 1842 start = time.time()
1844 1843 for i in xrange(total_files):
1845 1844 l = fp.readline()
1846 1845 try:
1847 1846 name, size = l.split('\0', 1)
1848 1847 size = int(size)
1849 1848 except ValueError, TypeError:
1850 1849 raise util.UnexpectedOutput(
1851 1850 _('Unexpected response from remote server:'), l)
1852 1851 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1853 1852 ofp = self.sopener(name, 'w')
1854 1853 for chunk in util.filechunkiter(fp, limit=size):
1855 1854 ofp.write(chunk)
1856 1855 ofp.close()
1857 1856 elapsed = time.time() - start
1858 1857 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1859 1858 (util.bytecount(total_bytes), elapsed,
1860 1859 util.bytecount(total_bytes / elapsed)))
1861 1860 self.reload()
1862 1861 return len(self.heads()) + 1
1863 1862
1864 1863 def clone(self, remote, heads=[], stream=False):
1865 1864 '''clone remote repository.
1866 1865
1867 1866 keyword arguments:
1868 1867 heads: list of revs to clone (forces use of pull)
1869 1868 stream: use streaming clone if possible'''
1870 1869
1871 1870 # now, all clients that can request uncompressed clones can
1872 1871 # read repo formats supported by all servers that can serve
1873 1872 # them.
1874 1873
1875 1874 # if revlog format changes, client will have to check version
1876 1875 # and format flags on "stream" capability, and use
1877 1876 # uncompressed only if compatible.
1878 1877
1879 1878 if stream and not heads and remote.capable('stream'):
1880 1879 return self.stream_in(remote)
1881 1880 return self.pull(remote, heads)
1882 1881
1883 1882 # used to avoid circular references so destructors work
1884 1883 def aftertrans(base):
1885 1884 p = base
1886 1885 def a():
1887 1886 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1888 1887 util.rename(os.path.join(p, "journal.dirstate"),
1889 1888 os.path.join(p, "undo.dirstate"))
1890 1889 return a
1891 1890
1892 1891 def instance(ui, path, create):
1893 1892 return localrepository(ui, util.drop_scheme('file', path), create)
1894 1893
1895 1894 def islocal(path):
1896 1895 return True
@@ -1,9 +1,8
1 1 0
2 2 0
3 3 adding changesets
4 4 killed!
5 5 transaction abort!
6 6 rollback completed
7 7 00changelog.i
8 data
9 8 journal.dirstate
@@ -1,61 +1,61
1 1 #!/bin/sh
2 2
3 3 # This test tries to exercise the ssh functionality with a dummy script
4 4
5 5 cat <<'EOF' > dummyssh
6 6 #!/bin/sh
7 7 # this attempts to deal with relative pathnames
8 8 cd `dirname $0`
9 9
10 10 # check for proper args
11 11 if [ $1 != "user@dummy" ] ; then
12 12 exit -1
13 13 fi
14 14
15 15 # check that we're in the right directory
16 16 if [ ! -x dummyssh ] ; then
17 17 exit -1
18 18 fi
19 19
20 20 echo Got arguments 1:$1 2:$2 3:$3 4:$4 5:$5 >> dummylog
21 21 $2
22 22 EOF
23 23 chmod +x dummyssh
24 24
25 25 echo "# creating 'local'"
26 26 hg init local
27 27 echo this > local/foo
28 28 hg ci --cwd local -A -m "init" -d "1000000 0"
29 29
30 30 echo "#test failure"
31 31 hg init local
32 32
33 33 echo "# init+push to remote2"
34 34 hg init -e ./dummyssh ssh://user@dummy/remote2
35 35 hg incoming -R remote2 local
36 36 hg push -R local -e ./dummyssh ssh://user@dummy/remote2
37 37
38 38 echo "# clone to remote1"
39 39 hg clone -e ./dummyssh local ssh://user@dummy/remote1
40 40
41 41 echo "# init to existing repo"
42 42 hg init -e ./dummyssh ssh://user@dummy/remote1
43 43
44 44 echo "# clone to existing repo"
45 45 hg clone -e ./dummyssh local ssh://user@dummy/remote1
46 46
47 47 echo "# output of dummyssh"
48 48 cat dummylog
49 49
50 50 echo "# comparing repositories"
51 51 hg tip -q -R local
52 52 hg tip -q -R remote1
53 53 hg tip -q -R remote2
54 54
55 55 echo "# check names for repositories (clashes with URL schemes, special chars)"
56 56 for i in bundle file hg http https old-http ssh static-http " " "with space"; do
57 57 echo "# hg init \"$i\""
58 58 hg init "$i"
59 test -d "$i" -a -d "$i/.hg" -a -d "$i/.hg/data" && echo "ok" || echo "failed"
59 test -d "$i" -a -d "$i/.hg" && echo "ok" || echo "failed"
60 60 done
61 61
General Comments 0
You need to be logged in to leave comments. Login now