/* * Industry-pack bus support functions. * * Copyright (C) 2011-2012 CERN (www.cern.ch) * Author: Samuel Iglesias Gonsalvez * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; version 2 of the License. */ #include #include #include #include #include #define to_ipack_dev(device) container_of(device, struct ipack_device, dev) #define to_ipack_driver(drv) container_of(drv, struct ipack_driver, driver) static DEFINE_IDA(ipack_ida); static void ipack_device_release(struct device *dev) { struct ipack_device *device = to_ipack_dev(dev); kfree(device->id); device->release(device); } static inline const struct ipack_device_id * ipack_match_one_device(const struct ipack_device_id *id, const struct ipack_device *device) { if ((id->format == IPACK_ANY_FORMAT || id->format == device->id_format) && (id->vendor == IPACK_ANY_ID || id->vendor == device->id_vendor) && (id->device == IPACK_ANY_ID || id->device == device->id_device)) return id; return NULL; } static const struct ipack_device_id * ipack_match_id(const struct ipack_device_id *ids, struct ipack_device *idev) { if (ids) { while (ids->vendor || ids->device) { if (ipack_match_one_device(ids, idev)) return ids; ids++; } } return NULL; } static int ipack_bus_match(struct device *dev, struct device_driver *drv) { struct ipack_device *idev = to_ipack_dev(dev); struct ipack_driver *idrv = to_ipack_driver(drv); const struct ipack_device_id *found_id; found_id = ipack_match_id(idrv->id_table, idev); return found_id ? 1 : 0; } static int ipack_bus_probe(struct device *device) { struct ipack_device *dev = to_ipack_dev(device); struct ipack_driver *drv = to_ipack_driver(device->driver); if (!drv->ops->probe) return -EINVAL; return drv->ops->probe(dev); } static int ipack_bus_remove(struct device *device) { struct ipack_device *dev = to_ipack_dev(device); struct ipack_driver *drv = to_ipack_driver(device->driver); if (!drv->ops->remove) return -EINVAL; drv->ops->remove(dev); return 0; } static int ipack_uevent(struct device *dev, struct kobj_uevent_env *env) { struct ipack_device *idev; if (!dev) return -ENODEV; idev = to_ipack_dev(dev); if (add_uevent_var(env, "MODALIAS=ipack:f%02Xv%08Xd%08X", idev->id_format, idev->id_vendor, idev->id_device)) return -ENOMEM; return 0; } #define ipack_device_attr(field, format_string) \ static ssize_t \ field##_show(struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ struct ipack_device *idev = to_ipack_dev(dev); \ return sprintf(buf, format_string, idev->field); \ } static ssize_t id_show(struct device *dev, struct device_attribute *attr, char *buf) { unsigned int i, c, l, s; struct ipack_device *idev = to_ipack_dev(dev); switch (idev->id_format) { case IPACK_ID_VERSION_1: l = 0x7; s = 1; break; case IPACK_ID_VERSION_2: l = 0xf; s = 2; break; default: return -EIO; } c = 0; for (i = 0; i < idev->id_avail; i++) { if (i > 0) { if ((i & l) == 0) buf[c++] = '\n'; else if ((i & s) == 0) buf[c++] = ' '; } sprintf(&buf[c], "%02x", idev->id[i]); c += 2; } buf[c++] = '\n'; return c; } static ssize_t id_vendor_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ipack_device *idev = to_ipack_dev(dev); switch (idev->id_format) { case IPACK_ID_VERSION_1: return sprintf(buf, "0x%02x\n", idev->id_vendor); case IPACK_ID_VERSION_2: return sprintf(buf, "0x%06x\n", idev->id_vendor); default: return -EIO; } } static ssize_t id_device_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ipack_device *idev = to_ipack_dev(dev); switch (idev->id_format) { case IPACK_ID_VERSION_1: return sprintf(buf, "0x%02x\n", idev->id_device); case IPACK_ID_VERSION_2: return sprintf(buf, "0x%04x\n", idev->id_device); default: return -EIO; } } static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ipack_device *idev = to_ipack_dev(dev); return sprintf(buf, "ipac:f%02Xv%08Xd%08X", idev->id_format, idev->id_vendor, idev->id_device); } ipack_device_attr(id_format, "0x%hhx\n"); static DEVICE_ATTR_RO(id); static DEVICE_ATTR_RO(id_device); static DEVICE_ATTR_RO(id_format); static DEVICE_ATTR_RO(id_vendor); static DEVICE_ATTR_RO(modalias); static struct attribute *ipack_attrs[] = { &dev_attr_id.attr, &dev_attr_id_device.attr, &dev_attr_id_format.attr, &dev_attr_id_vendor.attr, &dev_attr_modalias.attr, NULL, }; ATTRIBUTE_GROUPS(ipack); static struct bus_type ipack_bus_type = { .name = "ipack", .probe = ipack_bus_probe, .match = ipack_bus_match, .remove = ipack_bus_remove, .dev_groups = ipack_groups, .uevent = ipack_uevent, }; struct ipack_bus_device *ipack_bus_register(struct device *parent, int slots, const struct ipack_bus_ops *ops, struct module *owner) { int bus_nr; struct ipack_bus_device *bus; bus = kzalloc(sizeof(*bus), GFP_KERNEL); if (!bus) return NULL; bus_nr = ida_simple_get(&ipack_ida, 0, 0, GFP_KERNEL); if (bus_nr < 0) { kfree(bus); return NULL; } bus->bus_nr = bus_nr; bus->parent = parent; bus->slots = slots; bus->ops = ops; bus->owner = owner; return bus; } EXPORT_SYMBOL_GPL(ipack_bus_register); static int ipack_unregister_bus_member(struct device *dev, void *data) { struct ipack_device *idev = to_ipack_dev(dev); struct ipack_bus_device *bus = data; if (idev->bus == bus) ipack_device_del(idev); return 1; } int ipack_bus_unregister(struct ipack_bus_device *bus) { bus_for_each_dev(&ipack_bus_type, NULL, bus, ipack_unregister_bus_member); ida_simple_remove(&ipack_ida, bus->bus_nr); kfree(bus); return 0; } EXPORT_SYMBOL_GPL(ipack_bus_unregister); int ipack_driver_register(struct ipack_driver *edrv, struct module *owner, const char *name) { edrv->driver.owner = owner; edrv->driver.name = name; edrv->driver.bus = &ipack_bus_type; return driver_register(&edrv->driver); } EXPORT_SYMBOL_GPL(ipack_driver_register); void ipack_driver_unregister(struct ipack_driver *edrv) { driver_unregister(&edrv->driver); } EXPORT_SYMBOL_GPL(ipack_driver_unregister); static u16 ipack_crc_byte(u16 crc, u8 c) { int i; crc ^= c << 8; for (i = 0; i < 8; i++) crc = (crc << 1) ^ ((crc & 0x8000) ? 0x1021 : 0); return crc; } /* * The algorithm in lib/crc-ccitt.c does not seem to apply since it uses the * opposite bit ordering. */ static u8 ipack_calc_crc1(struct ipack_device *dev) { u8 c; u16 crc; unsigned int i; crc = 0xffff; for (i = 0; i < dev->id_avail; i++) { c = (i != 11) ? dev->id[i] : 0; crc = ipack_crc_byte(crc, c); } crc = ~crc; return crc & 0xff; } static u16 ipack_calc_crc2(struct ipack_device *dev) { u8 c; u16 crc; unsigned int i; crc = 0xffff; for (i = 0; i < dev->id_avail; i++) { c = ((i != 0x18) && (i != 0x19)) ? dev->id[i] : 0; crc = ipack_crc_byte(crc, c); } crc = ~crc; return crc; } static void ipack_parse_id1(struct ipack_device *dev) { u8 *id = dev->id; u8 crc; dev->id_vendor = id[4]; dev->id_device = id[5]; dev->speed_8mhz = 1; dev->speed_32mhz = (id[7] == 'H'); crc = ipack_calc_crc1(dev); dev->id_crc_correct = (crc == id[11]); if (!dev->id_crc_correct) { dev_warn(&dev->dev, "ID CRC invalid found 0x%x, expected 0x%x.\n", id[11], crc); } } static void ipack_parse_id2(struct ipack_device *dev) { __be16 *id = (__be16 *) dev->id; u16 flags, crc; dev->id_vendor = ((be16_to_cpu(id[3]) & 0xff) << 16) + be16_to_cpu(id[4]); dev->id_device = be16_to_cpu(id[5]); flags = be16_to_cpu(id[10]); dev->speed_8mhz = !!(flags & 2); dev->speed_32mhz = !!(flags & 4); crc = ipack_calc_crc2(dev); dev->id_crc_correct = (crc == be16_to_cpu(id[12])); if (!dev->id_crc_correct) { dev_warn(&dev->dev, "ID CRC invalid found 0x%x, expected 0x%x.\n", id[11], crc); } } static int ipack_device_read_id(struct ipack_device *dev) { u8 __iomem *idmem; int i; int ret = 0; idmem = ioremap(dev->region[IPACK_ID_SPACE].start, dev->region[IPACK_ID_SPACE].size); if (!idmem) { dev_err(&dev->dev, "error mapping memory\n"); return -ENOMEM; } /* Determine ID PROM Data Format. If we find the ids "IPAC" or "IPAH" * we are dealing with a IndustryPack format 1 device. If we detect * "VITA4 " (16 bit big endian formatted) we are dealing with a * IndustryPack format 2 device */ if ((ioread8(idmem + 1) == 'I') && (ioread8(idmem + 3) == 'P') && (ioread8(idmem + 5) == 'A') && ((ioread8(idmem + 7) == 'C') || (ioread8(idmem + 7) == 'H'))) { dev->id_format = IPACK_ID_VERSION_1; dev->id_avail = ioread8(idmem + 0x15); if ((dev->id_avail < 0x0c) || (dev->id_avail > 0x40)) { dev_warn(&dev->dev, "invalid id size"); dev->id_avail = 0x0c; } } else if ((ioread8(idmem + 0) == 'I') && (ioread8(idmem + 1) == 'V') && (ioread8(idmem + 2) == 'A') && (ioread8(idmem + 3) == 'T') && (ioread8(idmem + 4) == ' ') && (ioread8(idmem + 5) == '4')) { dev->id_format = IPACK_ID_VERSION_2; dev->id_avail = ioread16be(idmem + 0x16); if ((dev->id_avail < 0x1a) || (dev->id_avail > 0x40)) { dev_warn(&dev->dev, "invalid id size"); dev->id_avail = 0x1a; } } else { dev->id_format = IPACK_ID_VERSION_INVALID; dev->id_avail = 0; } if (!dev->id_avail) { ret = -ENODEV; goto out; } /* Obtain the amount of memory required to store a copy of the complete * ID ROM contents */ dev->id = kmalloc(dev->id_avail, GFP_KERNEL); if (!dev->id) { ret = -ENOMEM; goto out; } for (i = 0; i < dev->id_avail; i++) { if (dev->id_format == IPACK_ID_VERSION_1) dev->id[i] = ioread8(idmem + (i << 1) + 1); else dev->id[i] = ioread8(idmem + i); } /* now we can finally work with the copy */ switch (dev->id_format) { case IPACK_ID_VERSION_1: ipack_parse_id1(dev); break; case IPACK_ID_VERSION_2: ipack_parse_id2(dev); break; } out: iounmap(idmem); return ret; } int ipack_device_init(struct ipack_device *dev) { int ret; dev->dev.bus = &ipack_bus_type; dev->dev.release = ipack_device_release; dev->dev.parent = dev->bus->parent; dev_set_name(&dev->dev, "ipack-dev.%u.%u", dev->bus->bus_nr, dev->slot); device_initialize(&dev->dev); if (dev->bus->ops->set_clockrate(dev, 8)) dev_warn(&dev->dev, "failed to switch to 8 MHz operation for reading of device ID.\n"); if (dev->bus->ops->reset_timeout(dev)) dev_warn(&dev->dev, "failed to reset potential timeout."); ret = ipack_device_read_id(dev); if (ret < 0) { dev_err(&dev->dev, "error reading device id section.\n"); return ret; } /* if the device supports 32 MHz operation, use it. */ if (dev->speed_32mhz) { ret = dev->bus->ops->set_clockrate(dev, 32); if (ret < 0) dev_err(&dev->dev, "failed to switch to 32 MHz operation.\n"); } return 0; } EXPORT_SYMBOL_GPL(ipack_device_init); int ipack_device_add(struct ipack_device *dev) { return device_add(&dev->dev); } EXPORT_SYMBOL_GPL(ipack_device_add); void ipack_device_del(struct ipack_device *dev) { device_del(&dev->dev); ipack_put_device(dev); } EXPORT_SYMBOL_GPL(ipack_device_del); void ipack_get_device(struct ipack_device *dev) { get_device(&dev->dev); } EXPORT_SYMBOL_GPL(ipack_get_device); void ipack_put_device(struct ipack_device *dev) { put_device(&dev->dev); } EXPORT_SYMBOL_GPL(ipack_put_device); static int __init ipack_init(void) { ida_init(&ipack_ida); return bus_register(&ipack_bus_type); } static void __exit ipack_exit(void) { bus_unregister(&ipack_bus_type); ida_destroy(&ipack_ida); } module_init(ipack_init); module_exit(ipack_exit); MODULE_AUTHOR("Samuel Iglesias Gonsalvez "); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Industry-pack bus core"); a> 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558
/*
 * Copyright © 2008 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Eric Anholt <eric@anholt.net>
 *
 */

#include <linux/types.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/uaccess.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/module.h>
#include <linux/mman.h>
#include <linux/pagemap.h>
#include "drmP.h"

/** @file drm_gem.c
 *
 * This file provides some of the base ioctls and library routines for
 * the graphics memory manager implemented by each device driver.
 *
 * Because various devices have different requirements in terms of
 * synchronization and migration strategies, implementing that is left up to
 * the driver, and all that the general API provides should be generic --
 * allocating objects, reading/writing data with the cpu, freeing objects.
 * Even there, platform-dependent optimizations for reading/writing data with
 * the CPU mean we'll likely hook those out to driver-specific calls.  However,
 * the DRI2 implementation wants to have at least allocate/mmap be generic.
 *
 * The goal was to have swap-backed object allocation managed through
 * struct file.  However, file descriptors as handles to a struct file have
 * two major failings:
 * - Process limits prevent more than 1024 or so being used at a time by
 *   default.
 * - Inability to allocate high fds will aggravate the X Server's select()
 *   handling, and likely that of many GL client applications as well.
 *
 * This led to a plan of using our own integer IDs (called handles, following
 * DRM terminology) to mimic fds, and implement the fd syscalls we need as
 * ioctls.  The objects themselves will still include the struct file so
 * that we can transition to fds if the required kernel infrastructure shows
 * up at a later date, and as our interface with shmfs for memory allocation.
 */

/*
 * We make up offsets for buffer objects so we can recognize them at
 * mmap time.
 */
#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)

/**
 * Initialize the GEM device fields
 */

int
drm_gem_init(struct drm_device *dev)
{
	struct drm_gem_mm *mm;

	spin_lock_init(&dev->object_name_lock);
	idr_init(&dev->object_name_idr);
	atomic_set(&dev->object_count, 0);
	atomic_set(&dev->object_memory, 0);
	atomic_set(&dev->pin_count, 0);
	atomic_set(&dev->pin_memory, 0);
	atomic_set(&dev->gtt_count, 0);
	atomic_set(&dev->gtt_memory, 0);

	mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
	if (!mm) {
		DRM_ERROR("out of memory\n");
		return -ENOMEM;
	}

	dev->mm_private = mm;

	if (drm_ht_create(&mm->offset_hash, 19)) {
		kfree(mm);
		return -ENOMEM;
	}

	if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START,
			DRM_FILE_PAGE_OFFSET_SIZE)) {
		drm_ht_remove(&mm->offset_hash);
		kfree(mm);
		return -ENOMEM;
	}

	return 0;
}

void
drm_gem_destroy(struct drm_device *dev)
{
	struct drm_gem_mm *mm = dev->mm_private;

	drm_mm_takedown(&mm->offset_manager);
	drm_ht_remove(&mm->offset_hash);
	kfree(mm);
	dev->mm_private = NULL;
}

/**
 * Allocate a GEM object of the specified size with shmfs backing store
 */
struct drm_gem_object *
drm_gem_object_alloc(struct drm_device *dev, size_t size)
{
	struct drm_gem_object *obj;

	BUG_ON((size & (PAGE_SIZE - 1)) != 0);

	obj = kzalloc(sizeof(*obj), GFP_KERNEL);

	obj->dev = dev;
	obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
	if (IS_ERR(obj->filp)) {
		kfree(obj);
		return NULL;
	}

	kref_init(&obj->refcount);
	kref_init(&obj->handlecount);
	obj->size = size;
	if (dev->driver->gem_init_object != NULL &&
	    dev->driver->gem_init_object(obj) != 0) {
		fput(obj->filp);
		kfree(obj);
		return NULL;
	}
	atomic_inc(&dev->object_count);
	atomic_add(obj->size, &dev->object_memory);
	return obj;
}
EXPORT_SYMBOL(drm_gem_object_alloc);

/**
 * Removes the mapping from handle to filp for this object.
 */
static int
drm_gem_handle_delete(struct drm_file *filp, int handle)
{
	struct drm_device *dev;
	struct drm_gem_object *obj;

	/* This is gross. The idr system doesn't let us try a delete and
	 * return an error code.  It just spews if you fail at deleting.
	 * So, we have to grab a lock around finding the object and then
	 * doing the delete on it and dropping the refcount, or the user
	 * could race us to double-decrement the refcount and cause a
	 * use-after-free later.  Given the frequency of our handle lookups,
	 * we may want to use ida for number allocation and a hash table
	 * for the pointers, anyway.
	 */
	spin_lock(&filp->table_lock);

	/* Check if we currently have a reference on the object */
	obj = idr_find(&filp->object_idr, handle);
	if (obj == NULL) {
		spin_unlock(&filp->table_lock);
		return -EINVAL;
	}
	dev = obj->dev;

	/* Release reference and decrement refcount. */
	idr_remove(&filp->object_idr, handle);
	spin_unlock(&filp->table_lock);

	mutex_lock(&dev->struct_mutex);
	drm_gem_object_handle_unreference(obj);
	mutex_unlock(&dev->struct_mutex);

	return 0;
}

/**
 * Create a handle for this object. This adds a handle reference
 * to the object, which includes a regular reference count. Callers
 * will likely want to dereference the object afterwards.
 */
int
drm_gem_handle_create(struct drm_file *file_priv,
		       struct drm_gem_object *obj,
		       int *handlep)
{
	int	ret;

	/*
	 * Get the user-visible handle using idr.
	 */
again:
	/* ensure there is space available to allocate a handle */
	if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0)
		return -ENOMEM;

	/* do the allocation under our spinlock */
	spin_lock(&file_priv->table_lock);
	ret = idr_get_new_above(&file_priv->object_idr, obj, 1, handlep);
	spin_unlock(&file_priv->table_lock);
	if (ret == -EAGAIN)
		goto again;

	if (ret != 0)
		return ret;

	drm_gem_object_handle_reference(obj);
	return 0;
}
EXPORT_SYMBOL(drm_gem_handle_create);

/** Returns a reference to the object named by the handle. */
struct drm_gem_object *
drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
		      int handle)
{
	struct drm_gem_object *obj;

	spin_lock(&filp->table_lock);

	/* Check if we currently have a reference on the object */
	obj = idr_find(&filp->object_idr, handle);
	if (obj == NULL) {
		spin_unlock(&filp->table_lock);
		return NULL;
	}

	drm_gem_object_reference(obj);

	spin_unlock(&filp->table_lock);

	return obj;
}
EXPORT_SYMBOL(drm_gem_object_lookup);

/**
 * Releases the handle to an mm object.
 */
int
drm_gem_close_ioctl(struct drm_device *dev, void *data,
		    struct drm_file *file_priv)
{
	struct drm_gem_close *args = data;
	int ret;

	if (!(dev->driver->driver_features & DRIVER_GEM))
		return -ENODEV;

	ret = drm_gem_handle_delete(file_priv, args->handle);

	return ret;
}

/**
 * Create a global name for an object, returning the name.
 *
 * Note that the name does not hold a reference; when the object
 * is freed, the name goes away.
 */
int
drm_gem_flink_ioctl(struct drm_device *dev, void *data,
		    struct drm_file *file_priv)
{
	struct drm_gem_flink *args = data;
	struct drm_gem_object *obj;
	int ret;

	if (!(dev->driver->driver_features & DRIVER_GEM))
		return -ENODEV;

	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
	if (obj == NULL)
		return -EBADF;

again:
	if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) {
		ret = -ENOMEM;
		goto err;
	}

	spin_lock(&dev->object_name_lock);
	if (!obj->name) {
		ret = idr_get_new_above(&dev->object_name_idr, obj, 1,
					&obj->name);
		args->name = (uint64_t) obj->name;
		spin_unlock(&dev->object_name_lock);

		if (ret == -EAGAIN)
			goto again;

		if (ret != 0)
			goto err;

		/* Allocate a reference for the name table.  */
		drm_gem_object_reference(obj);
	} else {
		args->name = (uint64_t) obj->name;
		spin_unlock(&dev->object_name_lock);
		ret = 0;
	}

err:
	mutex_lock(&dev->struct_mutex);
	drm_gem_object_unreference(obj);
	mutex_unlock(&dev->struct_mutex);
	return ret;
}

/**
 * Open an object using the global name, returning a handle and the size.
 *
 * This handle (of course) holds a reference to the object, so the object
 * will not go away until the handle is deleted.
 */
int
drm_gem_open_ioctl(struct drm_device *dev, void *data,
		   struct drm_file *file_priv)
{
	struct drm_gem_open *args = data;
	struct drm_gem_object *obj;
	int ret;
	int handle;

	if (!(dev->driver->driver_features & DRIVER_GEM))
		return -ENODEV;

	spin_lock(&dev->object_name_lock);
	obj = idr_find(&dev->object_name_idr, (int) args->name);
	if (obj)
		drm_gem_object_reference(obj);
	spin_unlock(&dev->object_name_lock);
	if (!obj)
		return -ENOENT;

	ret = drm_gem_handle_create(file_priv, obj, &handle);
	mutex_lock(&dev->struct_mutex);
	drm_gem_object_unreference(obj);
	mutex_unlock(&dev->struct_mutex);
	if (ret)
		return ret;

	args->handle = handle;
	args->size = obj->size;

	return 0;
}

/**
 * Called at device open time, sets up the structure for handling refcounting
 * of mm objects.
 */
void
drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
{
	idr_init(&file_private->object_idr);
	spin_lock_init(&file_private->table_lock);
}

/**
 * Called at device close to release the file's
 * handle references on objects.
 */
static int
drm_gem_object_release_handle(int id, void *ptr, void *data)
{
	struct drm_gem_object *obj = ptr;

	drm_gem_object_handle_unreference(obj);

	return 0;
}

/**
 * Called at close time when the filp is going away.
 *
 * Releases any remaining references on objects by this filp.
 */
void
drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
{
	mutex_lock(&dev->struct_mutex);
	idr_for_each(&file_private->object_idr,
		     &drm_gem_object_release_handle, NULL);

	idr_destroy(&file_private->object_idr);
	mutex_unlock(&dev->struct_mutex);
}

/**
 * Called after the last reference to the object has been lost.
 *
 * Frees the object
 */
void
drm_gem_object_free(struct kref *kref)
{
	struct drm_gem_object *obj = (struct drm_gem_object *) kref;
	struct drm_device *dev = obj->dev;

	BUG_ON(!mutex_is_locked(&dev->struct_mutex));

	if (dev->driver->gem_free_object != NULL)
		dev->driver->gem_free_object(obj);

	fput(obj->filp);
	atomic_dec(&dev->object_count);
	atomic_sub(obj->size, &dev->object_memory);
	kfree(obj);
}
EXPORT_SYMBOL(drm_gem_object_free);

/**
 * Called after the last handle to the object has been closed
 *
 * Removes any name for the object. Note that this must be
 * called before drm_gem_object_free or we'll be touching
 * freed memory
 */
void
drm_gem_object_handle_free(struct kref *kref)
{
	struct drm_gem_object *obj = container_of(kref,
						  struct drm_gem_object,
						  handlecount);
	struct drm_device *dev = obj->dev;

	/* Remove any name for this object */
	spin_lock(&dev->object_name_lock);
	if (obj->name) {
		idr_remove(&dev->object_name_idr, obj->name);
		obj->name = 0;
		spin_unlock(&dev->object_name_lock);
		/*
		 * The object name held a reference to this object, drop
		 * that now.
		 */
		drm_gem_object_unreference(obj);
	} else
		spin_unlock(&dev->object_name_lock);

}
EXPORT_SYMBOL(drm_gem_object_handle_free);

void drm_gem_vm_open(struct vm_area_struct *vma)
{
	struct drm_gem_object *obj = vma->vm_private_data;

	drm_gem_object_reference(obj);
}
EXPORT_SYMBOL(drm_gem_vm_open);

void drm_gem_vm_close(struct vm_area_struct *vma)
{
	struct drm_gem_object *obj = vma->vm_private_data;
	struct drm_device *dev = obj->dev;

	mutex_lock(&dev->struct_mutex);
	drm_gem_object_unreference(obj);
	mutex_unlock(&dev->struct_mutex);
}
EXPORT_SYMBOL(drm_gem_vm_close);


/**
 * drm_gem_mmap - memory map routine for GEM objects
 * @filp: DRM file pointer
 * @vma: VMA for the area to be mapped
 *
 * If a driver supports GEM object mapping, mmap calls on the DRM file
 * descriptor will end up here.
 *
 * If we find the object based on the offset passed in (vma->vm_pgoff will
 * contain the fake offset we created when the GTT map ioctl was called on
 * the object), we set up the driver fault handler so that any accesses
 * to the object can be trapped, to perform migration, GTT binding, surface
 * register allocation, or performance monitoring.
 */
int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
{
	struct drm_file *priv = filp->private_data;
	struct drm_device *dev = priv->minor->dev;
	struct drm_gem_mm *mm = dev->mm_private;
	struct drm_local_map *map = NULL;
	struct drm_gem_object *obj;
	struct drm_hash_item *hash;
	int ret = 0;