Lines Matching refs:fault
68 u8 fault;
70 } **fault;
173 * page fault) and maybe some other commands.
387 /* Issue fault replay for GPU to retry accesses that faulted previously. */
398 /* Cancel a replayable fault that could not be handled.
400 * Cancelling the fault will trigger recovery to reset the engine
420 struct nouveau_svm_fault *fault)
422 nouveau_svm_fault_cancel(svm, fault->inst,
423 fault->hub,
424 fault->gpc,
425 fault->client);
460 struct nouveau_svm_fault *fault;
468 if (!buffer->fault[buffer->fault_nr]) {
469 fault = kmalloc(sizeof(*fault), GFP_KERNEL);
470 if (WARN_ON(!fault)) {
474 buffer->fault[buffer->fault_nr] = fault;
477 fault = buffer->fault[buffer->fault_nr++];
478 fault->inst = inst;
479 fault->addr = (u64)addrhi << 32 | addrlo;
480 fault->time = (u64)timehi << 32 | timelo;
481 fault->engine = engine;
482 fault->gpc = gpc;
483 fault->hub = hub;
484 fault->access = (info & 0x000f0000) >> 16;
485 fault->client = client;
486 fault->fault = (info & 0x0000001f);
488 SVM_DBG(svm, "fault %016llx %016llx %02x",
489 fault->inst, fault->addr, fault->access);
530 /* Have HMM fault pages within the fault window to the GPU. */
600 fault buffer entries into a cache, and update
603 SVM_DBG(svm, "fault handler");
619 SVM_DBG(svm, "%d fault(s) pending", buffer->fault_nr);
625 sort(buffer->fault, buffer->fault_nr, sizeof(*buffer->fault),
631 if (!svmm || buffer->fault[fi]->inst != inst) {
633 nouveau_ivmm_find(svm, buffer->fault[fi]->inst);
635 inst = buffer->fault[fi]->inst;
638 buffer->fault[fi]->svmm = svmm;
654 if (!(svmm = buffer->fault[fi]->svmm)) {
655 nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
658 SVMM_DBG(svmm, "addr %016llx", buffer->fault[fi]->addr);
663 start = buffer->fault[fi]->addr;
674 nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
678 /* Intersect fault window with the CPU VMA, cancelling
679 * the fault if the address is invalid.
687 nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
695 if (buffer->fault[fi]->addr != start) {
696 SVMM_ERR(svmm, "addr %016llx", buffer->fault[fi]->addr);
698 nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
703 * fault window, determining required pages and access
709 /* Determine required permissions based on GPU fault
713 if (buffer->fault[fn]->access != 0 /* READ. */ &&
714 buffer->fault[fn]->access != 3 /* PREFETCH. */) {
730 buffer->fault[fn]->svmm == svmm &&
731 buffer->fault[fn ]->addr ==
732 buffer->fault[fn - 1]->addr);
734 /* If the next fault is outside the window, or all GPU
738 buffer->fault[fn]->svmm != svmm ||
739 buffer->fault[fn]->addr >= limit)
742 /* Fill in the gap between this fault and the next. */
743 fill = (buffer->fault[fn ]->addr -
744 buffer->fault[fn - 1]->addr) >> PAGE_SHIFT;
749 SVMM_DBG(svmm, "wndw %016llx-%016llx covering %d fault(s)",
773 struct nouveau_svm_fault *fault = buffer->fault[fi++];
774 pi = (fault->addr - args.i.p.addr) >> PAGE_SHIFT;
778 fault->access != 0 && fault->access != 3)) {
779 nouveau_svm_fault_cancel_fault(svm, fault);
786 /* Issue fault replay to the GPU. */
816 if (buffer->fault) {
817 for (i = 0; buffer->fault[i] && i < buffer->entries; i++)
818 kfree(buffer->fault[i]);
819 kvfree(buffer->fault);
842 SVM_ERR(svm, "Fault buffer allocation failed: %d", ret);
857 buffer->fault = kvzalloc(sizeof(*buffer->fault) * buffer->entries, GFP_KERNEL);
858 if (!buffer->fault)
918 SVM_DBG(svm, "No supported fault buffer class");